summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-03-30 10:44:03 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-10 22:04:19 -0400
commit3ba374a5d94f8c2067731155afaf79f03e6c390c (patch)
treed8a2bd0d52b1e8862510aedeb7529944c0b7e28e /drivers/gpu/nvgpu/gk20a
parent2be51206af88aba6662cdd9de5bd6c18989bbcbd (diff)
gpu: nvgpu: gk20a: Use new error macro
gk20a_err() and gk20a_warn() require a struct device pointer, which is not portable across operating systems. The new nvgpu_err() and nvgpu_warn() macros take struct gk20a pointer. Convert code to use the more portable macros. JIRA NVGPU-16 Change-Id: Ia51f36d94c5ce57a5a0ab83b3c83a6bce09e2d5c Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1331694 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/bus_gk20a.c3
-rw-r--r--drivers/gpu/nvgpu/gk20a/cde_gk20a.c109
-rw-r--r--drivers/gpu/nvgpu/gk20a/ce2_gk20a.c16
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c49
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c28
-rw-r--r--drivers/gpu/nvgpu/gk20a/clk_gk20a.c13
-rw-r--r--drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c14
-rw-r--r--drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c84
-rw-r--r--drivers/gpu/nvgpu/gk20a/debug_gk20a.c5
-rw-r--r--drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c14
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c100
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.c44
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a_scale.c6
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c5
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c329
-rw-r--r--drivers/gpu/nvgpu/gk20a/hal.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/hal_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/ltc_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c119
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c208
-rw-r--r--drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c5
-rw-r--r--drivers/gpu/nvgpu/gk20a/regops_gk20a.c12
-rw-r--r--drivers/gpu/nvgpu/gk20a/sched_gk20a.c7
-rw-r--r--drivers/gpu/nvgpu/gk20a/sim_gk20a.c26
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c6
27 files changed, 600 insertions, 630 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/bus_gk20a.c b/drivers/gpu/nvgpu/gk20a/bus_gk20a.c
index dd96df16..3119e373 100644
--- a/drivers/gpu/nvgpu/gk20a/bus_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/bus_gk20a.c
@@ -17,6 +17,7 @@
17#include <soc/tegra/chip-id.h> 17#include <soc/tegra/chip-id.h>
18 18
19#include <nvgpu/page_allocator.h> 19#include <nvgpu/page_allocator.h>
20#include <nvgpu/log.h>
20 21
21#include "gk20a.h" 22#include "gk20a.h"
22 23
@@ -126,7 +127,7 @@ int gk20a_read_ptimer(struct gk20a *g, u64 *value)
126 } 127 }
127 128
128 /* too many iterations, bail out */ 129 /* too many iterations, bail out */
129 gk20a_err(dev_from_gk20a(g), "failed to read ptimer"); 130 nvgpu_err(g, "failed to read ptimer");
130 return -EBUSY; 131 return -EBUSY;
131} 132}
132 133
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
index 16baaa39..296a8af0 100644
--- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
@@ -28,6 +28,7 @@
28#include <nvgpu/timers.h> 28#include <nvgpu/timers.h>
29#include <nvgpu/nvgpu_common.h> 29#include <nvgpu/nvgpu_common.h>
30#include <nvgpu/kmem.h> 30#include <nvgpu/kmem.h>
31#include <nvgpu/log.h>
31 32
32#include "gk20a.h" 33#include "gk20a.h"
33#include "channel_gk20a.h" 34#include "channel_gk20a.h"
@@ -228,19 +229,20 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx,
228 struct gk20a_cde_hdr_buf *buf) 229 struct gk20a_cde_hdr_buf *buf)
229{ 230{
230 struct nvgpu_mem *mem; 231 struct nvgpu_mem *mem;
232 struct gk20a *g = cde_ctx->g;
231 int err; 233 int err;
232 234
233 /* check that the file can hold the buf */ 235 /* check that the file can hold the buf */
234 if (buf->data_byte_offset != 0 && 236 if (buf->data_byte_offset != 0 &&
235 buf->data_byte_offset + buf->num_bytes > img->size) { 237 buf->data_byte_offset + buf->num_bytes > img->size) {
236 gk20a_warn(cde_ctx->dev, "cde: invalid data section. buffer idx = %d", 238 nvgpu_warn(g, "cde: invalid data section. buffer idx = %d",
237 cde_ctx->num_bufs); 239 cde_ctx->num_bufs);
238 return -EINVAL; 240 return -EINVAL;
239 } 241 }
240 242
241 /* check that we have enough buf elems available */ 243 /* check that we have enough buf elems available */
242 if (cde_ctx->num_bufs >= MAX_CDE_BUFS) { 244 if (cde_ctx->num_bufs >= MAX_CDE_BUFS) {
243 gk20a_warn(cde_ctx->dev, "cde: invalid data section. buffer idx = %d", 245 nvgpu_warn(g, "cde: invalid data section. buffer idx = %d",
244 cde_ctx->num_bufs); 246 cde_ctx->num_bufs);
245 return -ENOMEM; 247 return -ENOMEM;
246 } 248 }
@@ -249,7 +251,7 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx,
249 mem = cde_ctx->mem + cde_ctx->num_bufs; 251 mem = cde_ctx->mem + cde_ctx->num_bufs;
250 err = nvgpu_dma_alloc_map_sys(cde_ctx->vm, buf->num_bytes, mem); 252 err = nvgpu_dma_alloc_map_sys(cde_ctx->vm, buf->num_bytes, mem);
251 if (err) { 253 if (err) {
252 gk20a_warn(cde_ctx->dev, "cde: could not allocate device memory. buffer idx = %d", 254 nvgpu_warn(g, "cde: could not allocate device memory. buffer idx = %d",
253 cde_ctx->num_bufs); 255 cde_ctx->num_bufs);
254 return -ENOMEM; 256 return -ENOMEM;
255 } 257 }
@@ -267,6 +269,7 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx,
267static int gk20a_replace_data(struct gk20a_cde_ctx *cde_ctx, void *target, 269static int gk20a_replace_data(struct gk20a_cde_ctx *cde_ctx, void *target,
268 int type, s32 shift, u64 mask, u64 value) 270 int type, s32 shift, u64 mask, u64 value)
269{ 271{
272 struct gk20a *g = cde_ctx->g;
270 u32 *target_mem_ptr = target; 273 u32 *target_mem_ptr = target;
271 u64 *target_mem_ptr_u64 = target; 274 u64 *target_mem_ptr_u64 = target;
272 u64 current_value, new_value; 275 u64 current_value, new_value;
@@ -287,7 +290,7 @@ static int gk20a_replace_data(struct gk20a_cde_ctx *cde_ctx, void *target,
287 current_value = (u64)(current_value >> 32) | 290 current_value = (u64)(current_value >> 32) |
288 (u64)(current_value << 32); 291 (u64)(current_value << 32);
289 } else { 292 } else {
290 gk20a_warn(cde_ctx->dev, "cde: unknown type. type=%d", 293 nvgpu_warn(g, "cde: unknown type. type=%d",
291 type); 294 type);
292 return -EINVAL; 295 return -EINVAL;
293 } 296 }
@@ -315,13 +318,14 @@ static int gk20a_init_cde_replace(struct gk20a_cde_ctx *cde_ctx,
315{ 318{
316 struct nvgpu_mem *source_mem; 319 struct nvgpu_mem *source_mem;
317 struct nvgpu_mem *target_mem; 320 struct nvgpu_mem *target_mem;
321 struct gk20a *g = cde_ctx->g;
318 u32 *target_mem_ptr; 322 u32 *target_mem_ptr;
319 u64 vaddr; 323 u64 vaddr;
320 int err; 324 int err;
321 325
322 if (replace->target_buf >= cde_ctx->num_bufs || 326 if (replace->target_buf >= cde_ctx->num_bufs ||
323 replace->source_buf >= cde_ctx->num_bufs) { 327 replace->source_buf >= cde_ctx->num_bufs) {
324 gk20a_warn(cde_ctx->dev, "cde: invalid buffer. target_buf=%u, source_buf=%u, num_bufs=%d", 328 nvgpu_warn(g, "cde: invalid buffer. target_buf=%u, source_buf=%u, num_bufs=%d",
325 replace->target_buf, replace->source_buf, 329 replace->target_buf, replace->source_buf,
326 cde_ctx->num_bufs); 330 cde_ctx->num_bufs);
327 return -EINVAL; 331 return -EINVAL;
@@ -333,7 +337,7 @@ static int gk20a_init_cde_replace(struct gk20a_cde_ctx *cde_ctx,
333 337
334 if (source_mem->size < (replace->source_byte_offset + 3) || 338 if (source_mem->size < (replace->source_byte_offset + 3) ||
335 target_mem->size < (replace->target_byte_offset + 3)) { 339 target_mem->size < (replace->target_byte_offset + 3)) {
336 gk20a_warn(cde_ctx->dev, "cde: invalid buffer offsets. target_buf_offs=%lld, source_buf_offs=%lld, source_buf_size=%zu, dest_buf_size=%zu", 340 nvgpu_warn(g, "cde: invalid buffer offsets. target_buf_offs=%lld, source_buf_offs=%lld, source_buf_size=%zu, dest_buf_size=%zu",
337 replace->target_byte_offset, 341 replace->target_byte_offset,
338 replace->source_byte_offset, 342 replace->source_byte_offset,
339 source_mem->size, 343 source_mem->size,
@@ -350,7 +354,7 @@ static int gk20a_init_cde_replace(struct gk20a_cde_ctx *cde_ctx,
350 replace->shift, replace->mask, 354 replace->shift, replace->mask,
351 vaddr); 355 vaddr);
352 if (err) { 356 if (err) {
353 gk20a_warn(cde_ctx->dev, "cde: replace failed. err=%d, target_buf=%u, target_buf_offs=%lld, source_buf=%u, source_buf_offs=%lld", 357 nvgpu_warn(g, "cde: replace failed. err=%d, target_buf=%u, target_buf_offs=%lld, source_buf=%u, source_buf_offs=%lld",
354 err, replace->target_buf, 358 err, replace->target_buf,
355 replace->target_byte_offset, 359 replace->target_byte_offset,
356 replace->source_buf, 360 replace->source_buf,
@@ -438,7 +442,7 @@ static int gk20a_cde_patch_params(struct gk20a_cde_ctx *cde_ctx)
438 param->shift, param->mask, new_data); 442 param->shift, param->mask, new_data);
439 443
440 if (err) { 444 if (err) {
441 gk20a_warn(cde_ctx->dev, "cde: patch failed. err=%d, idx=%d, id=%d, target_buf=%u, target_buf_offs=%lld, patch_value=%llu", 445 nvgpu_warn(g, "cde: patch failed. err=%d, idx=%d, id=%d, target_buf=%u, target_buf_offs=%lld, patch_value=%llu",
442 err, i, param->id, param->target_buf, 446 err, i, param->id, param->target_buf,
443 param->target_byte_offset, new_data); 447 param->target_byte_offset, new_data);
444 return err; 448 return err;
@@ -453,9 +457,10 @@ static int gk20a_init_cde_param(struct gk20a_cde_ctx *cde_ctx,
453 struct gk20a_cde_hdr_param *param) 457 struct gk20a_cde_hdr_param *param)
454{ 458{
455 struct nvgpu_mem *target_mem; 459 struct nvgpu_mem *target_mem;
460 struct gk20a *g = cde_ctx->g;
456 461
457 if (param->target_buf >= cde_ctx->num_bufs) { 462 if (param->target_buf >= cde_ctx->num_bufs) {
458 gk20a_warn(cde_ctx->dev, "cde: invalid buffer parameter. param idx = %d, target_buf=%u, num_bufs=%u", 463 nvgpu_warn(g, "cde: invalid buffer parameter. param idx = %d, target_buf=%u, num_bufs=%u",
459 cde_ctx->num_params, param->target_buf, 464 cde_ctx->num_params, param->target_buf,
460 cde_ctx->num_bufs); 465 cde_ctx->num_bufs);
461 return -EINVAL; 466 return -EINVAL;
@@ -463,7 +468,7 @@ static int gk20a_init_cde_param(struct gk20a_cde_ctx *cde_ctx,
463 468
464 target_mem = cde_ctx->mem + param->target_buf; 469 target_mem = cde_ctx->mem + param->target_buf;
465 if (target_mem->size < (param->target_byte_offset + 3)) { 470 if (target_mem->size < (param->target_byte_offset + 3)) {
466 gk20a_warn(cde_ctx->dev, "cde: invalid buffer parameter. param idx = %d, target_buf_offs=%lld, target_buf_size=%zu", 471 nvgpu_warn(g, "cde: invalid buffer parameter. param idx = %d, target_buf_offs=%lld, target_buf_size=%zu",
467 cde_ctx->num_params, param->target_byte_offset, 472 cde_ctx->num_params, param->target_byte_offset,
468 target_mem->size); 473 target_mem->size);
469 return -EINVAL; 474 return -EINVAL;
@@ -471,14 +476,14 @@ static int gk20a_init_cde_param(struct gk20a_cde_ctx *cde_ctx,
471 476
472 /* does this parameter fit into our parameter structure */ 477 /* does this parameter fit into our parameter structure */
473 if (cde_ctx->num_params >= MAX_CDE_PARAMS) { 478 if (cde_ctx->num_params >= MAX_CDE_PARAMS) {
474 gk20a_warn(cde_ctx->dev, "cde: no room for new parameters param idx = %d", 479 nvgpu_warn(g, "cde: no room for new parameters param idx = %d",
475 cde_ctx->num_params); 480 cde_ctx->num_params);
476 return -ENOMEM; 481 return -ENOMEM;
477 } 482 }
478 483
479 /* is the given id valid? */ 484 /* is the given id valid? */
480 if (param->id >= NUM_RESERVED_PARAMS + MAX_CDE_USER_PARAMS) { 485 if (param->id >= NUM_RESERVED_PARAMS + MAX_CDE_USER_PARAMS) {
481 gk20a_warn(cde_ctx->dev, "cde: parameter id is not valid. param idx = %d, id=%u, max=%u", 486 nvgpu_warn(g, "cde: parameter id is not valid. param idx = %d, id=%u, max=%u",
482 param->id, cde_ctx->num_params, 487 param->id, cde_ctx->num_params,
483 NUM_RESERVED_PARAMS + MAX_CDE_USER_PARAMS); 488 NUM_RESERVED_PARAMS + MAX_CDE_USER_PARAMS);
484 return -EINVAL; 489 return -EINVAL;
@@ -494,6 +499,7 @@ static int gk20a_init_cde_required_class(struct gk20a_cde_ctx *cde_ctx,
494 const struct firmware *img, 499 const struct firmware *img,
495 u32 required_class) 500 u32 required_class)
496{ 501{
502 struct gk20a *g = cde_ctx->g;
497 struct nvgpu_alloc_obj_ctx_args alloc_obj_ctx; 503 struct nvgpu_alloc_obj_ctx_args alloc_obj_ctx;
498 int err; 504 int err;
499 505
@@ -505,7 +511,7 @@ static int gk20a_init_cde_required_class(struct gk20a_cde_ctx *cde_ctx,
505 511
506 err = gk20a_alloc_obj_ctx(cde_ctx->ch, &alloc_obj_ctx); 512 err = gk20a_alloc_obj_ctx(cde_ctx->ch, &alloc_obj_ctx);
507 if (err) { 513 if (err) {
508 gk20a_warn(cde_ctx->dev, "cde: failed to allocate ctx. err=%d", 514 nvgpu_warn(g, "cde: failed to allocate ctx. err=%d",
509 err); 515 err);
510 return err; 516 return err;
511 } 517 }
@@ -519,6 +525,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
519 struct gk20a_cde_cmd_elem *cmd_elem, 525 struct gk20a_cde_cmd_elem *cmd_elem,
520 u32 num_elems) 526 u32 num_elems)
521{ 527{
528 struct gk20a *g = cde_ctx->g;
522 struct nvgpu_gpfifo **gpfifo, *gpfifo_elem; 529 struct nvgpu_gpfifo **gpfifo, *gpfifo_elem;
523 u32 *num_entries; 530 u32 *num_entries;
524 unsigned int i; 531 unsigned int i;
@@ -531,7 +538,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
531 gpfifo = &cde_ctx->convert_cmd; 538 gpfifo = &cde_ctx->convert_cmd;
532 num_entries = &cde_ctx->convert_cmd_num_entries; 539 num_entries = &cde_ctx->convert_cmd_num_entries;
533 } else { 540 } else {
534 gk20a_warn(cde_ctx->dev, "cde: unknown command. op=%u", 541 nvgpu_warn(g, "cde: unknown command. op=%u",
535 op); 542 op);
536 return -EINVAL; 543 return -EINVAL;
537 } 544 }
@@ -540,7 +547,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
540 *gpfifo = nvgpu_kzalloc(cde_ctx->g, 547 *gpfifo = nvgpu_kzalloc(cde_ctx->g,
541 sizeof(struct nvgpu_gpfifo) * num_elems); 548 sizeof(struct nvgpu_gpfifo) * num_elems);
542 if (!*gpfifo) { 549 if (!*gpfifo) {
543 gk20a_warn(cde_ctx->dev, "cde: could not allocate memory for gpfifo entries"); 550 nvgpu_warn(g, "cde: could not allocate memory for gpfifo entries");
544 return -ENOMEM; 551 return -ENOMEM;
545 } 552 }
546 553
@@ -550,7 +557,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
550 557
551 /* validate the current entry */ 558 /* validate the current entry */
552 if (cmd_elem->target_buf >= cde_ctx->num_bufs) { 559 if (cmd_elem->target_buf >= cde_ctx->num_bufs) {
553 gk20a_warn(cde_ctx->dev, "cde: target buffer is not available (target=%u, num_bufs=%u)", 560 nvgpu_warn(g, "cde: target buffer is not available (target=%u, num_bufs=%u)",
554 cmd_elem->target_buf, cde_ctx->num_bufs); 561 cmd_elem->target_buf, cde_ctx->num_bufs);
555 return -EINVAL; 562 return -EINVAL;
556 } 563 }
@@ -558,7 +565,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
558 target_mem = cde_ctx->mem + cmd_elem->target_buf; 565 target_mem = cde_ctx->mem + cmd_elem->target_buf;
559 if (target_mem->size< 566 if (target_mem->size<
560 cmd_elem->target_byte_offset + cmd_elem->num_bytes) { 567 cmd_elem->target_byte_offset + cmd_elem->num_bytes) {
561 gk20a_warn(cde_ctx->dev, "cde: target buffer cannot hold all entries (target_size=%zu, target_byte_offset=%lld, num_bytes=%llu)", 568 nvgpu_warn(g, "cde: target buffer cannot hold all entries (target_size=%zu, target_byte_offset=%lld, num_bytes=%llu)",
562 target_mem->size, 569 target_mem->size,
563 cmd_elem->target_byte_offset, 570 cmd_elem->target_byte_offset,
564 cmd_elem->num_bytes); 571 cmd_elem->num_bytes);
@@ -582,6 +589,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
582 589
583static int gk20a_cde_pack_cmdbufs(struct gk20a_cde_ctx *cde_ctx) 590static int gk20a_cde_pack_cmdbufs(struct gk20a_cde_ctx *cde_ctx)
584{ 591{
592 struct gk20a *g = cde_ctx->g;
585 unsigned long init_bytes = cde_ctx->init_cmd_num_entries * 593 unsigned long init_bytes = cde_ctx->init_cmd_num_entries *
586 sizeof(struct nvgpu_gpfifo); 594 sizeof(struct nvgpu_gpfifo);
587 unsigned long conv_bytes = cde_ctx->convert_cmd_num_entries * 595 unsigned long conv_bytes = cde_ctx->convert_cmd_num_entries *
@@ -592,8 +600,8 @@ static int gk20a_cde_pack_cmdbufs(struct gk20a_cde_ctx *cde_ctx)
592 /* allocate buffer that has space for both */ 600 /* allocate buffer that has space for both */
593 combined_cmd = nvgpu_kzalloc(cde_ctx->g, total_bytes); 601 combined_cmd = nvgpu_kzalloc(cde_ctx->g, total_bytes);
594 if (!combined_cmd) { 602 if (!combined_cmd) {
595 gk20a_warn(cde_ctx->dev, 603 nvgpu_warn(g,
596 "cde: could not allocate memory for gpfifo entries"); 604 "cde: could not allocate memory for gpfifo entries");
597 return -ENOMEM; 605 return -ENOMEM;
598 } 606 }
599 607
@@ -615,6 +623,7 @@ static int gk20a_cde_pack_cmdbufs(struct gk20a_cde_ctx *cde_ctx)
615static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx, 623static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
616 const struct firmware *img) 624 const struct firmware *img)
617{ 625{
626 struct gk20a *g = cde_ctx->g;
618 struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app; 627 struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app;
619 u32 *data = (u32 *)img->data; 628 u32 *data = (u32 *)img->data;
620 u32 num_of_elems; 629 u32 num_of_elems;
@@ -625,7 +634,7 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
625 634
626 min_size += 2 * sizeof(u32); 635 min_size += 2 * sizeof(u32);
627 if (img->size < min_size) { 636 if (img->size < min_size) {
628 gk20a_warn(cde_ctx->dev, "cde: invalid image header"); 637 nvgpu_warn(g, "cde: invalid image header");
629 return -EINVAL; 638 return -EINVAL;
630 } 639 }
631 640
@@ -634,7 +643,7 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
634 643
635 min_size += num_of_elems * sizeof(*elem); 644 min_size += num_of_elems * sizeof(*elem);
636 if (img->size < min_size) { 645 if (img->size < min_size) {
637 gk20a_warn(cde_ctx->dev, "cde: bad image"); 646 nvgpu_warn(g, "cde: bad image");
638 return -EINVAL; 647 return -EINVAL;
639 } 648 }
640 649
@@ -671,7 +680,7 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
671 MAX_CDE_ARRAY_ENTRIES*sizeof(u32)); 680 MAX_CDE_ARRAY_ENTRIES*sizeof(u32));
672 break; 681 break;
673 default: 682 default:
674 gk20a_warn(cde_ctx->dev, "cde: unknown header element"); 683 nvgpu_warn(g, "cde: unknown header element");
675 err = -EINVAL; 684 err = -EINVAL;
676 } 685 }
677 686
@@ -682,13 +691,13 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
682 } 691 }
683 692
684 if (!cde_ctx->init_convert_cmd || !cde_ctx->init_cmd_num_entries) { 693 if (!cde_ctx->init_convert_cmd || !cde_ctx->init_cmd_num_entries) {
685 gk20a_warn(cde_ctx->dev, "cde: convert command not defined"); 694 nvgpu_warn(g, "cde: convert command not defined");
686 err = -EINVAL; 695 err = -EINVAL;
687 goto deinit_image; 696 goto deinit_image;
688 } 697 }
689 698
690 if (!cde_ctx->convert_cmd || !cde_ctx->convert_cmd_num_entries) { 699 if (!cde_ctx->convert_cmd || !cde_ctx->convert_cmd_num_entries) {
691 gk20a_warn(cde_ctx->dev, "cde: convert command not defined"); 700 nvgpu_warn(g, "cde: convert command not defined");
692 err = -EINVAL; 701 err = -EINVAL;
693 goto deinit_image; 702 goto deinit_image;
694 } 703 }
@@ -708,6 +717,7 @@ static int gk20a_cde_execute_buffer(struct gk20a_cde_ctx *cde_ctx,
708 u32 op, struct nvgpu_fence *fence, 717 u32 op, struct nvgpu_fence *fence,
709 u32 flags, struct gk20a_fence **fence_out) 718 u32 flags, struct gk20a_fence **fence_out)
710{ 719{
720 struct gk20a *g = cde_ctx->g;
711 struct nvgpu_gpfifo *gpfifo = NULL; 721 struct nvgpu_gpfifo *gpfifo = NULL;
712 int num_entries = 0; 722 int num_entries = 0;
713 723
@@ -721,12 +731,12 @@ static int gk20a_cde_execute_buffer(struct gk20a_cde_ctx *cde_ctx,
721 gpfifo = cde_ctx->convert_cmd; 731 gpfifo = cde_ctx->convert_cmd;
722 num_entries = cde_ctx->convert_cmd_num_entries; 732 num_entries = cde_ctx->convert_cmd_num_entries;
723 } else { 733 } else {
724 gk20a_warn(cde_ctx->dev, "cde: unknown buffer"); 734 nvgpu_warn(g, "cde: unknown buffer");
725 return -EINVAL; 735 return -EINVAL;
726 } 736 }
727 737
728 if (gpfifo == NULL || num_entries == 0) { 738 if (gpfifo == NULL || num_entries == 0) {
729 gk20a_warn(cde_ctx->dev, "cde: buffer not available"); 739 nvgpu_warn(g, "cde: buffer not available");
730 return -ENOSYS; 740 return -ENOSYS;
731 } 741 }
732 742
@@ -765,7 +775,6 @@ __releases(&cde_app->mutex)
765 struct gk20a_cde_ctx *cde_ctx = container_of(delay_work, 775 struct gk20a_cde_ctx *cde_ctx = container_of(delay_work,
766 struct gk20a_cde_ctx, ctx_deleter_work); 776 struct gk20a_cde_ctx, ctx_deleter_work);
767 struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app; 777 struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app;
768 struct device *dev = cde_ctx->dev;
769 struct gk20a *g = cde_ctx->g; 778 struct gk20a *g = cde_ctx->g;
770 int err; 779 int err;
771 780
@@ -780,7 +789,7 @@ __releases(&cde_app->mutex)
780 if (err) { 789 if (err) {
781 /* this context would find new use anyway later, so not freeing 790 /* this context would find new use anyway later, so not freeing
782 * here does not leak anything */ 791 * here does not leak anything */
783 gk20a_warn(dev, "cde: cannot set gk20a on, postponing" 792 nvgpu_warn(g, "cde: cannot set gk20a on, postponing"
784 " temp ctx deletion"); 793 " temp ctx deletion");
785 return; 794 return;
786 } 795 }
@@ -848,7 +857,7 @@ __must_hold(&cde_app->mutex)
848 857
849 cde_ctx = gk20a_cde_allocate_context(g); 858 cde_ctx = gk20a_cde_allocate_context(g);
850 if (IS_ERR(cde_ctx)) { 859 if (IS_ERR(cde_ctx)) {
851 gk20a_warn(g->dev, "cde: cannot allocate context: %ld", 860 nvgpu_warn(g, "cde: cannot allocate context: %ld",
852 PTR_ERR(cde_ctx)); 861 PTR_ERR(cde_ctx));
853 return cde_ctx; 862 return cde_ctx;
854 } 863 }
@@ -1023,7 +1032,7 @@ __releases(&cde_app->mutex)
1023 1032
1024 surface = dma_buf_vmap(compbits_scatter_buf); 1033 surface = dma_buf_vmap(compbits_scatter_buf);
1025 if (IS_ERR(surface)) { 1034 if (IS_ERR(surface)) {
1026 gk20a_warn(g->dev, 1035 nvgpu_warn(g,
1027 "dma_buf_vmap failed"); 1036 "dma_buf_vmap failed");
1028 err = -EINVAL; 1037 err = -EINVAL;
1029 goto exit_unmap_vaddr; 1038 goto exit_unmap_vaddr;
@@ -1035,7 +1044,7 @@ __releases(&cde_app->mutex)
1035 surface, scatter_buffer); 1044 surface, scatter_buffer);
1036 sgt = gk20a_mm_pin(g->dev, compbits_scatter_buf); 1045 sgt = gk20a_mm_pin(g->dev, compbits_scatter_buf);
1037 if (IS_ERR(sgt)) { 1046 if (IS_ERR(sgt)) {
1038 gk20a_warn(g->dev, 1047 nvgpu_warn(g,
1039 "mm_pin failed"); 1048 "mm_pin failed");
1040 err = -EINVAL; 1049 err = -EINVAL;
1041 goto exit_unmap_surface; 1050 goto exit_unmap_surface;
@@ -1083,7 +1092,7 @@ __releases(&cde_app->mutex)
1083 int id = param->id - NUM_RESERVED_PARAMS; 1092 int id = param->id - NUM_RESERVED_PARAMS;
1084 1093
1085 if (id < 0 || id >= MAX_CDE_USER_PARAMS) { 1094 if (id < 0 || id >= MAX_CDE_USER_PARAMS) {
1086 gk20a_warn(cde_ctx->dev, "cde: unknown user parameter"); 1095 nvgpu_warn(g, "cde: unknown user parameter");
1087 err = -EINVAL; 1096 err = -EINVAL;
1088 goto exit_unmap_surface; 1097 goto exit_unmap_surface;
1089 } 1098 }
@@ -1093,7 +1102,7 @@ __releases(&cde_app->mutex)
1093 /* patch data */ 1102 /* patch data */
1094 err = gk20a_cde_patch_params(cde_ctx); 1103 err = gk20a_cde_patch_params(cde_ctx);
1095 if (err) { 1104 if (err) {
1096 gk20a_warn(cde_ctx->dev, "cde: failed to patch parameters"); 1105 nvgpu_warn(g, "cde: failed to patch parameters");
1097 goto exit_unmap_surface; 1106 goto exit_unmap_surface;
1098 } 1107 }
1099 1108
@@ -1160,20 +1169,19 @@ __releases(&cde_app->mutex)
1160 1169
1161 if (ch->has_timedout) { 1170 if (ch->has_timedout) {
1162 if (cde_ctx->is_temporary) { 1171 if (cde_ctx->is_temporary) {
1163 gk20a_warn(cde_ctx->dev, 1172 nvgpu_warn(g,
1164 "cde: channel had timed out" 1173 "cde: channel had timed out"
1165 " (temporary channel)"); 1174 " (temporary channel)");
1166 /* going to be deleted anyway */ 1175 /* going to be deleted anyway */
1167 } else { 1176 } else {
1168 gk20a_warn(cde_ctx->dev, 1177 nvgpu_warn(g,
1169 "cde: channel had timed out" 1178 "cde: channel had timed out"
1170 ", reloading"); 1179 ", reloading");
1171 /* mark it to be deleted, replace with a new one */ 1180 /* mark it to be deleted, replace with a new one */
1172 nvgpu_mutex_acquire(&cde_app->mutex); 1181 nvgpu_mutex_acquire(&cde_app->mutex);
1173 cde_ctx->is_temporary = true; 1182 cde_ctx->is_temporary = true;
1174 if (gk20a_cde_create_context(g)) { 1183 if (gk20a_cde_create_context(g)) {
1175 gk20a_err(cde_ctx->dev, 1184 nvgpu_err(g, "cde: can't replace context");
1176 "cde: can't replace context");
1177 } 1185 }
1178 nvgpu_mutex_release(&cde_app->mutex); 1186 nvgpu_mutex_release(&cde_app->mutex);
1179 } 1187 }
@@ -1201,7 +1209,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
1201 1209
1202 img = nvgpu_request_firmware(g, "gpu2cde.bin", 0); 1210 img = nvgpu_request_firmware(g, "gpu2cde.bin", 0);
1203 if (!img) { 1211 if (!img) {
1204 dev_err(cde_ctx->dev, "cde: could not fetch the firmware"); 1212 nvgpu_err(g, "cde: could not fetch the firmware");
1205 return -ENOSYS; 1213 return -ENOSYS;
1206 } 1214 }
1207 1215
@@ -1210,7 +1218,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
1210 -1, 1218 -1,
1211 false); 1219 false);
1212 if (!ch) { 1220 if (!ch) {
1213 gk20a_warn(cde_ctx->dev, "cde: gk20a channel not available"); 1221 nvgpu_warn(g, "cde: gk20a channel not available");
1214 err = -ENOMEM; 1222 err = -ENOMEM;
1215 goto err_get_gk20a_channel; 1223 goto err_get_gk20a_channel;
1216 } 1224 }
@@ -1218,14 +1226,14 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
1218 /* bind the channel to the vm */ 1226 /* bind the channel to the vm */
1219 err = __gk20a_vm_bind_channel(&g->mm.cde.vm, ch); 1227 err = __gk20a_vm_bind_channel(&g->mm.cde.vm, ch);
1220 if (err) { 1228 if (err) {
1221 gk20a_warn(cde_ctx->dev, "cde: could not bind vm"); 1229 nvgpu_warn(g, "cde: could not bind vm");
1222 goto err_commit_va; 1230 goto err_commit_va;
1223 } 1231 }
1224 1232
1225 /* allocate gpfifo (1024 should be more than enough) */ 1233 /* allocate gpfifo (1024 should be more than enough) */
1226 err = gk20a_channel_alloc_gpfifo(ch, 1024, 0, 0); 1234 err = gk20a_channel_alloc_gpfifo(ch, 1024, 0, 0);
1227 if (err) { 1235 if (err) {
1228 gk20a_warn(cde_ctx->dev, "cde: unable to allocate gpfifo"); 1236 nvgpu_warn(g, "cde: unable to allocate gpfifo");
1229 goto err_alloc_gpfifo; 1237 goto err_alloc_gpfifo;
1230 } 1238 }
1231 1239
@@ -1238,7 +1246,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
1238 gr->compbit_store.mem.aperture); 1246 gr->compbit_store.mem.aperture);
1239 1247
1240 if (!vaddr) { 1248 if (!vaddr) {
1241 gk20a_warn(cde_ctx->dev, "cde: cannot map compression bit backing store"); 1249 nvgpu_warn(g, "cde: cannot map compression bit backing store");
1242 err = -ENOMEM; 1250 err = -ENOMEM;
1243 goto err_map_backingstore; 1251 goto err_map_backingstore;
1244 } 1252 }
@@ -1251,7 +1259,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
1251 /* initialise the firmware */ 1259 /* initialise the firmware */
1252 err = gk20a_init_cde_img(cde_ctx, img); 1260 err = gk20a_init_cde_img(cde_ctx, img);
1253 if (err) { 1261 if (err) {
1254 gk20a_warn(cde_ctx->dev, "cde: image initialisation failed"); 1262 nvgpu_warn(g, "cde: image initialisation failed");
1255 goto err_init_cde_img; 1263 goto err_init_cde_img;
1256 } 1264 }
1257 1265
@@ -1268,8 +1276,7 @@ err_alloc_gpfifo:
1268err_commit_va: 1276err_commit_va:
1269err_get_gk20a_channel: 1277err_get_gk20a_channel:
1270 release_firmware(img); 1278 release_firmware(img);
1271 dev_err(cde_ctx->dev, "cde: couldn't initialise buffer converter: %d", 1279 nvgpu_err(g, "cde: couldn't initialise buffer converter: %d", err);
1272 err);
1273 return err; 1280 return err;
1274} 1281}
1275 1282
@@ -1413,17 +1420,17 @@ static int gk20a_buffer_convert_gpu_to_cde_v1(
1413 g->ops.cde.get_program_numbers(g, block_height_log2, 1420 g->ops.cde.get_program_numbers(g, block_height_log2,
1414 &hprog, &vprog); 1421 &hprog, &vprog);
1415 else { 1422 else {
1416 gk20a_warn(g->dev, "cde: chip not supported"); 1423 nvgpu_warn(g, "cde: chip not supported");
1417 return -ENOSYS; 1424 return -ENOSYS;
1418 } 1425 }
1419 1426
1420 if (hprog < 0 || vprog < 0) { 1427 if (hprog < 0 || vprog < 0) {
1421 gk20a_warn(g->dev, "cde: could not determine programs"); 1428 nvgpu_warn(g, "cde: could not determine programs");
1422 return -ENOSYS; 1429 return -ENOSYS;
1423 } 1430 }
1424 1431
1425 if (xtiles > 8192 / 8 || ytiles > 8192 / 8) 1432 if (xtiles > 8192 / 8 || ytiles > 8192 / 8)
1426 gk20a_warn(g->dev, "cde: surface is exceptionally large (xtiles=%d, ytiles=%d)", 1433 nvgpu_warn(g, "cde: surface is exceptionally large (xtiles=%d, ytiles=%d)",
1427 xtiles, ytiles); 1434 xtiles, ytiles);
1428 1435
1429 gk20a_dbg(gpu_dbg_cde, "w=%d, h=%d, bh_log2=%d, compbits_hoffset=0x%llx, compbits_voffset=0x%llx, scatterbuffer_offset=0x%llx", 1436 gk20a_dbg(gpu_dbg_cde, "w=%d, h=%d, bh_log2=%d, compbits_hoffset=0x%llx, compbits_voffset=0x%llx, scatterbuffer_offset=0x%llx",
@@ -1541,7 +1548,7 @@ static int gk20a_buffer_convert_gpu_to_cde(
1541 width, height, block_height_log2, 1548 width, height, block_height_log2,
1542 submit_flags, fence_in, state); 1549 submit_flags, fence_in, state);
1543 } else { 1550 } else {
1544 dev_err(dev_from_gk20a(g), "unsupported CDE firmware version %d", 1551 nvgpu_err(g, "unsupported CDE firmware version %d",
1545 g->cde_app.firmware_version); 1552 g->cde_app.firmware_version);
1546 err = -EINVAL; 1553 err = -EINVAL;
1547 } 1554 }
@@ -1628,13 +1635,13 @@ int gk20a_mark_compressible_write(struct gk20a *g, u32 buffer_fd,
1628 1635
1629 dmabuf = dma_buf_get(buffer_fd); 1636 dmabuf = dma_buf_get(buffer_fd);
1630 if (IS_ERR(dmabuf)) { 1637 if (IS_ERR(dmabuf)) {
1631 dev_err(dev_from_gk20a(g), "invalid dmabuf"); 1638 nvgpu_err(g, "invalid dmabuf");
1632 return -EINVAL; 1639 return -EINVAL;
1633 } 1640 }
1634 1641
1635 err = gk20a_dmabuf_get_state(dmabuf, dev_from_gk20a(g), offset, &state); 1642 err = gk20a_dmabuf_get_state(dmabuf, dev_from_gk20a(g), offset, &state);
1636 if (err) { 1643 if (err) {
1637 dev_err(dev_from_gk20a(g), "could not get state from dmabuf"); 1644 nvgpu_err(g, "could not get state from dmabuf");
1638 dma_buf_put(dmabuf); 1645 dma_buf_put(dmabuf);
1639 return err; 1646 return err;
1640 } 1647 }
diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
index f3ac28ea..c502add5 100644
--- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
@@ -31,6 +31,8 @@
31#include "gk20a.h" 31#include "gk20a.h"
32#include "debug_gk20a.h" 32#include "debug_gk20a.h"
33 33
34#include <nvgpu/log.h>
35
34#include <nvgpu/hw/gk20a/hw_ce2_gk20a.h> 36#include <nvgpu/hw/gk20a/hw_ce2_gk20a.h>
35#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h> 37#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h>
36#include <nvgpu/hw/gk20a/hw_ccsr_gk20a.h> 38#include <nvgpu/hw/gk20a/hw_ccsr_gk20a.h>
@@ -459,7 +461,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev,
459 runlist_id, 461 runlist_id,
460 true); 462 true);
461 if (!ce_ctx->ch) { 463 if (!ce_ctx->ch) {
462 gk20a_err(ce_ctx->dev, "ce: gk20a channel not available"); 464 nvgpu_err(g, "ce: gk20a channel not available");
463 goto end; 465 goto end;
464 } 466 }
465 ce_ctx->ch->wdt_enabled = false; 467 ce_ctx->ch->wdt_enabled = false;
@@ -467,21 +469,21 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev,
467 /* bind the channel to the vm */ 469 /* bind the channel to the vm */
468 err = __gk20a_vm_bind_channel(&g->mm.ce.vm, ce_ctx->ch); 470 err = __gk20a_vm_bind_channel(&g->mm.ce.vm, ce_ctx->ch);
469 if (err) { 471 if (err) {
470 gk20a_err(ce_ctx->dev, "ce: could not bind vm"); 472 nvgpu_err(g, "ce: could not bind vm");
471 goto end; 473 goto end;
472 } 474 }
473 475
474 /* allocate gpfifo (1024 should be more than enough) */ 476 /* allocate gpfifo (1024 should be more than enough) */
475 err = gk20a_channel_alloc_gpfifo(ce_ctx->ch, 1024, 0, 0); 477 err = gk20a_channel_alloc_gpfifo(ce_ctx->ch, 1024, 0, 0);
476 if (err) { 478 if (err) {
477 gk20a_err(ce_ctx->dev, "ce: unable to allocate gpfifo"); 479 nvgpu_err(g, "ce: unable to allocate gpfifo");
478 goto end; 480 goto end;
479 } 481 }
480 482
481 /* allocate command buffer (4096 should be more than enough) from sysmem*/ 483 /* allocate command buffer (4096 should be more than enough) from sysmem*/
482 err = nvgpu_dma_alloc_map_sys(ce_ctx->vm, NVGPU_CE_COMMAND_BUF_SIZE, &ce_ctx->cmd_buf_mem); 484 err = nvgpu_dma_alloc_map_sys(ce_ctx->vm, NVGPU_CE_COMMAND_BUF_SIZE, &ce_ctx->cmd_buf_mem);
483 if (err) { 485 if (err) {
484 gk20a_err(ce_ctx->dev, 486 nvgpu_err(g,
485 "ce: could not allocate command buffer for CE context"); 487 "ce: could not allocate command buffer for CE context");
486 goto end; 488 goto end;
487 } 489 }
@@ -492,7 +494,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev,
492 if (priority != -1) { 494 if (priority != -1) {
493 err = gk20a_fifo_set_priority(ce_ctx->ch, priority); 495 err = gk20a_fifo_set_priority(ce_ctx->ch, priority);
494 if (err) { 496 if (err) {
495 gk20a_err(ce_ctx->dev, 497 nvgpu_err(g,
496 "ce: could not set the channel priority for CE context"); 498 "ce: could not set the channel priority for CE context");
497 goto end; 499 goto end;
498 } 500 }
@@ -502,7 +504,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev,
502 if (timeslice != -1) { 504 if (timeslice != -1) {
503 err = gk20a_fifo_set_timeslice(ce_ctx->ch, timeslice); 505 err = gk20a_fifo_set_timeslice(ce_ctx->ch, timeslice);
504 if (err) { 506 if (err) {
505 gk20a_err(ce_ctx->dev, 507 nvgpu_err(g,
506 "ce: could not set the channel timeslice value for CE context"); 508 "ce: could not set the channel timeslice value for CE context");
507 goto end; 509 goto end;
508 } 510 }
@@ -512,7 +514,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev,
512 if (runlist_level != -1) { 514 if (runlist_level != -1) {
513 err = gk20a_channel_set_runlist_interleave(ce_ctx->ch, runlist_level); 515 err = gk20a_channel_set_runlist_interleave(ce_ctx->ch, runlist_level);
514 if (err) { 516 if (err) {
515 gk20a_err(ce_ctx->dev, 517 nvgpu_err(g,
516 "ce: could not set the runlist interleave for CE context"); 518 "ce: could not set the runlist interleave for CE context");
517 goto end; 519 goto end;
518 } 520 }
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 94d193ed..c684be1f 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -27,6 +27,7 @@
27#include <nvgpu/timers.h> 27#include <nvgpu/timers.h>
28#include <nvgpu/kmem.h> 28#include <nvgpu/kmem.h>
29#include <nvgpu/dma.h> 29#include <nvgpu/dma.h>
30#include <nvgpu/log.h>
30 31
31#include "gk20a.h" 32#include "gk20a.h"
32#include "debug_gk20a.h" 33#include "debug_gk20a.h"
@@ -301,7 +302,7 @@ int gk20a_wait_channel_idle(struct channel_gk20a *ch)
301 } while (!nvgpu_timeout_expired(&timeout)); 302 } while (!nvgpu_timeout_expired(&timeout));
302 303
303 if (!channel_idle) { 304 if (!channel_idle) {
304 gk20a_err(dev_from_gk20a(ch->g), "jobs not freed for channel %d\n", 305 nvgpu_err(ch->g, "jobs not freed for channel %d\n",
305 ch->hw_chid); 306 ch->hw_chid);
306 return -EBUSY; 307 return -EBUSY;
307 } 308 }
@@ -322,7 +323,7 @@ int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch,
322 int ret; 323 int ret;
323 324
324 if (gk20a_is_channel_marked_as_tsg(ch)) { 325 if (gk20a_is_channel_marked_as_tsg(ch)) {
325 gk20a_err(dev_from_gk20a(g), "invalid operation for TSG!\n"); 326 nvgpu_err(g, "invalid operation for TSG!\n");
326 return -EINVAL; 327 return -EINVAL;
327 } 328 }
328 329
@@ -362,7 +363,7 @@ void gk20a_set_error_notifier_locked(struct channel_gk20a *ch, __u32 error)
362 ch->error_notifier->info32 = error; 363 ch->error_notifier->info32 = error;
363 ch->error_notifier->status = 0xffff; 364 ch->error_notifier->status = 0xffff;
364 365
365 gk20a_err(dev_from_gk20a(ch->g), 366 nvgpu_err(ch->g,
366 "error notifier set to %d for ch %d", error, ch->hw_chid); 367 "error notifier set to %d for ch %d", error, ch->hw_chid);
367 } 368 }
368} 369}
@@ -398,7 +399,7 @@ static void gk20a_wait_until_counter_is_N(
398 msecs_to_jiffies(5000)) > 0) 399 msecs_to_jiffies(5000)) > 0)
399 break; 400 break;
400 401
401 gk20a_warn(dev_from_gk20a(ch->g), 402 nvgpu_warn(ch->g,
402 "%s: channel %d, still waiting, %s left: %d, waiting for: %d", 403 "%s: channel %d, still waiting, %s left: %d, waiting for: %d",
403 caller, ch->hw_chid, counter_name, 404 caller, ch->hw_chid, counter_name,
404 atomic_read(counter), wait_value); 405 atomic_read(counter), wait_value);
@@ -476,7 +477,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
476 nvgpu_spinlock_acquire(&ch->ref_obtain_lock); 477 nvgpu_spinlock_acquire(&ch->ref_obtain_lock);
477 if (!ch->referenceable) { 478 if (!ch->referenceable) {
478 nvgpu_spinlock_release(&ch->ref_obtain_lock); 479 nvgpu_spinlock_release(&ch->ref_obtain_lock);
479 gk20a_err(dev_from_gk20a(ch->g), 480 nvgpu_err(ch->g,
480 "Extra %s() called to channel %u", 481 "Extra %s() called to channel %u",
481 __func__, ch->hw_chid); 482 __func__, ch->hw_chid);
482 return; 483 return;
@@ -795,7 +796,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
795 ch = allocate_channel(f); 796 ch = allocate_channel(f);
796 if (ch == NULL) { 797 if (ch == NULL) {
797 /* TBD: we want to make this virtualizable */ 798 /* TBD: we want to make this virtualizable */
798 gk20a_err(dev_from_gk20a(g), "out of hw chids"); 799 nvgpu_err(g, "out of hw chids");
799 return NULL; 800 return NULL;
800 } 801 }
801 802
@@ -813,7 +814,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
813 if (g->ops.fifo.alloc_inst(g, ch)) { 814 if (g->ops.fifo.alloc_inst(g, ch)) {
814 ch->g = NULL; 815 ch->g = NULL;
815 free_channel(f, ch); 816 free_channel(f, ch);
816 gk20a_err(dev_from_gk20a(g), 817 nvgpu_err(g,
817 "failed to open gk20a channel, out of inst mem"); 818 "failed to open gk20a channel, out of inst mem");
818 return NULL; 819 return NULL;
819 } 820 }
@@ -873,7 +874,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
873 used for inserting commands before/after user submitted buffers. */ 874 used for inserting commands before/after user submitted buffers. */
874static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c) 875static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
875{ 876{
876 struct device *d = dev_from_gk20a(c->g); 877 struct gk20a *g = c->g;
877 struct vm_gk20a *ch_vm = c->vm; 878 struct vm_gk20a *ch_vm = c->vm;
878 struct priv_cmd_queue *q = &c->priv_cmd_q; 879 struct priv_cmd_queue *q = &c->priv_cmd_q;
879 u32 size; 880 u32 size;
@@ -901,7 +902,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
901 902
902 err = nvgpu_dma_alloc_map_sys(ch_vm, size, &q->mem); 903 err = nvgpu_dma_alloc_map_sys(ch_vm, size, &q->mem);
903 if (err) { 904 if (err) {
904 gk20a_err(d, "%s: memory allocation failed\n", __func__); 905 nvgpu_err(g, "%s: memory allocation failed\n", __func__);
905 goto clean_up; 906 goto clean_up;
906 } 907 }
907 908
@@ -938,7 +939,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
938 gk20a_dbg_fn("size %d", orig_size); 939 gk20a_dbg_fn("size %d", orig_size);
939 940
940 if (!e) { 941 if (!e) {
941 gk20a_err(dev_from_gk20a(c->g), 942 nvgpu_err(c->g,
942 "ch %d: priv cmd entry is null", 943 "ch %d: priv cmd entry is null",
943 c->hw_chid); 944 c->hw_chid);
944 return -EINVAL; 945 return -EINVAL;
@@ -1016,7 +1017,7 @@ static int channel_gk20a_alloc_job(struct channel_gk20a *c,
1016 if (CIRC_SPACE(put, get, c->joblist.pre_alloc.length)) 1017 if (CIRC_SPACE(put, get, c->joblist.pre_alloc.length))
1017 *job_out = &c->joblist.pre_alloc.jobs[put]; 1018 *job_out = &c->joblist.pre_alloc.jobs[put];
1018 else { 1019 else {
1019 gk20a_warn(dev_from_gk20a(c->g), 1020 nvgpu_warn(c->g,
1020 "out of job ringbuffer space\n"); 1021 "out of job ringbuffer space\n");
1021 err = -EAGAIN; 1022 err = -EAGAIN;
1022 } 1023 }
@@ -1231,7 +1232,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1231 1232
1232 /* an address space needs to have been bound at this point. */ 1233 /* an address space needs to have been bound at this point. */
1233 if (!gk20a_channel_as_bound(c)) { 1234 if (!gk20a_channel_as_bound(c)) {
1234 gk20a_err(d, 1235 nvgpu_err(g,
1235 "not bound to an address space at time of gpfifo" 1236 "not bound to an address space at time of gpfifo"
1236 " allocation."); 1237 " allocation.");
1237 return -EINVAL; 1238 return -EINVAL;
@@ -1239,7 +1240,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1239 ch_vm = c->vm; 1240 ch_vm = c->vm;
1240 1241
1241 if (c->gpfifo.mem.size) { 1242 if (c->gpfifo.mem.size) {
1242 gk20a_err(d, "channel %d :" 1243 nvgpu_err(g, "channel %d :"
1243 "gpfifo already allocated", c->hw_chid); 1244 "gpfifo already allocated", c->hw_chid);
1244 return -EEXIST; 1245 return -EEXIST;
1245 } 1246 }
@@ -1248,7 +1249,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1248 gpfifo_size * sizeof(struct nvgpu_gpfifo), 1249 gpfifo_size * sizeof(struct nvgpu_gpfifo),
1249 &c->gpfifo.mem); 1250 &c->gpfifo.mem);
1250 if (err) { 1251 if (err) {
1251 gk20a_err(d, "%s: memory allocation failed\n", __func__); 1252 nvgpu_err(g, "%s: memory allocation failed\n", __func__);
1252 goto clean_up; 1253 goto clean_up;
1253 } 1254 }
1254 1255
@@ -1334,7 +1335,7 @@ clean_up_unmap:
1334 nvgpu_dma_unmap_free(ch_vm, &c->gpfifo.mem); 1335 nvgpu_dma_unmap_free(ch_vm, &c->gpfifo.mem);
1335clean_up: 1336clean_up:
1336 memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc)); 1337 memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc));
1337 gk20a_err(d, "fail"); 1338 nvgpu_err(g, "fail");
1338 return err; 1339 return err;
1339} 1340}
1340 1341
@@ -1607,7 +1608,7 @@ static void gk20a_channel_timeout_handler(struct channel_gk20a *ch)
1607 return; 1608 return;
1608 } 1609 }
1609 1610
1610 gk20a_err(dev_from_gk20a(g), "Job on channel %d timed out", 1611 nvgpu_err(g, "Job on channel %d timed out",
1611 ch->hw_chid); 1612 ch->hw_chid);
1612 1613
1613 gk20a_debug_dump(g->dev); 1614 gk20a_debug_dump(g->dev);
@@ -1761,7 +1762,7 @@ static void gk20a_channel_worker_process(struct gk20a *g, int *get)
1761 * other reasons than a channel added in the items list 1762 * other reasons than a channel added in the items list
1762 * currently, so warn and ack the message. 1763 * currently, so warn and ack the message.
1763 */ 1764 */
1764 gk20a_warn(g->dev, "Spurious worker event!"); 1765 nvgpu_warn(g, "Spurious worker event!");
1765 ++*get; 1766 ++*get;
1766 break; 1767 break;
1767 } 1768 }
@@ -1820,7 +1821,7 @@ int nvgpu_channel_worker_init(struct gk20a *g)
1820 task = kthread_run(gk20a_channel_poll_worker, g, 1821 task = kthread_run(gk20a_channel_poll_worker, g,
1821 "nvgpu_channel_poll_%s", g->name); 1822 "nvgpu_channel_poll_%s", g->name);
1822 if (IS_ERR(task)) { 1823 if (IS_ERR(task)) {
1823 gk20a_err(g->dev, "failed to start channel poller thread"); 1824 nvgpu_err(g, "failed to start channel poller thread");
1824 return PTR_ERR(task); 1825 return PTR_ERR(task);
1825 } 1826 }
1826 g->channel_worker.poll_task = task; 1827 g->channel_worker.poll_task = task;
@@ -1853,7 +1854,7 @@ void gk20a_channel_worker_enqueue(struct channel_gk20a *ch)
1853 * one ref already, so can't fail. 1854 * one ref already, so can't fail.
1854 */ 1855 */
1855 if (WARN_ON(!gk20a_channel_get(ch))) { 1856 if (WARN_ON(!gk20a_channel_get(ch))) {
1856 gk20a_warn(g->dev, "cannot get ch ref for worker!"); 1857 nvgpu_warn(g, "cannot get ch ref for worker!");
1857 return; 1858 return;
1858 } 1859 }
1859 1860
@@ -1876,7 +1877,7 @@ void gk20a_channel_worker_enqueue(struct channel_gk20a *ch)
1876int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e) 1877int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e)
1877{ 1878{
1878 struct priv_cmd_queue *q = &c->priv_cmd_q; 1879 struct priv_cmd_queue *q = &c->priv_cmd_q;
1879 struct device *d = dev_from_gk20a(c->g); 1880 struct gk20a *g = c->g;
1880 1881
1881 if (!e) 1882 if (!e)
1882 return 0; 1883 return 0;
@@ -1885,7 +1886,7 @@ int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e)
1885 /* read the entry's valid flag before reading its contents */ 1886 /* read the entry's valid flag before reading its contents */
1886 rmb(); 1887 rmb();
1887 if ((q->get != e->off) && e->off != 0) 1888 if ((q->get != e->off) && e->off != 0)
1888 gk20a_err(d, "requests out-of-order, ch=%d\n", 1889 nvgpu_err(g, "requests out-of-order, ch=%d\n",
1889 c->hw_chid); 1890 c->hw_chid);
1890 q->get = e->off + e->size; 1891 q->get = e->off + e->size;
1891 } 1892 }
@@ -2416,7 +2417,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
2416 * So, add extra_entries in user request. Also, HW with fifo size N 2417 * So, add extra_entries in user request. Also, HW with fifo size N
2417 * can accept only N-1 entreis and so the below condition */ 2418 * can accept only N-1 entreis and so the below condition */
2418 if (c->gpfifo.entry_num - 1 < num_entries + extra_entries) { 2419 if (c->gpfifo.entry_num - 1 < num_entries + extra_entries) {
2419 gk20a_err(d, "not enough gpfifo space allocated"); 2420 nvgpu_err(g, "not enough gpfifo space allocated");
2420 return -ENOMEM; 2421 return -ENOMEM;
2421 } 2422 }
2422 2423
@@ -2430,7 +2431,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
2430 2431
2431 /* an address space needs to have been bound at this point. */ 2432 /* an address space needs to have been bound at this point. */
2432 if (!gk20a_channel_as_bound(c)) { 2433 if (!gk20a_channel_as_bound(c)) {
2433 gk20a_err(d, 2434 nvgpu_err(g,
2434 "not bound to an address space at time of gpfifo" 2435 "not bound to an address space at time of gpfifo"
2435 " submission."); 2436 " submission.");
2436 return -EINVAL; 2437 return -EINVAL;
@@ -2512,7 +2513,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
2512 /* released by job cleanup via syncpt or sema interrupt */ 2513 /* released by job cleanup via syncpt or sema interrupt */
2513 err = gk20a_busy(g); 2514 err = gk20a_busy(g);
2514 if (err) { 2515 if (err) {
2515 gk20a_err(d, "failed to host gk20a to submit gpfifo, process %s", 2516 nvgpu_err(g, "failed to host gk20a to submit gpfifo, process %s",
2516 current->comm); 2517 current->comm);
2517 return err; 2518 return err;
2518 } 2519 }
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
index fc5862e1..fbeb1e4a 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
@@ -20,6 +20,7 @@
20 20
21#include <nvgpu/semaphore.h> 21#include <nvgpu/semaphore.h>
22#include <nvgpu/kmem.h> 22#include <nvgpu/kmem.h>
23#include <nvgpu/log.h>
23 24
24#include "channel_sync_gk20a.h" 25#include "channel_sync_gk20a.h"
25#include "gk20a.h" 26#include "gk20a.h"
@@ -65,8 +66,7 @@ static int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s,
65 int err = 0; 66 int err = 0;
66 67
67 if (!nvhost_syncpt_is_valid_pt_ext(sp->host1x_pdev, id)) { 68 if (!nvhost_syncpt_is_valid_pt_ext(sp->host1x_pdev, id)) {
68 dev_warn(dev_from_gk20a(c->g), 69 nvgpu_warn(c->g, "invalid wait id in gpfifo submit, elided");
69 "invalid wait id in gpfifo submit, elided");
70 return 0; 70 return 0;
71 } 71 }
72 72
@@ -75,7 +75,7 @@ static int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s,
75 75
76 err = gk20a_channel_alloc_priv_cmdbuf(c, 4, wait_cmd); 76 err = gk20a_channel_alloc_priv_cmdbuf(c, 4, wait_cmd);
77 if (err) { 77 if (err) {
78 gk20a_err(dev_from_gk20a(c->g), 78 nvgpu_err(c->g,
79 "not enough priv cmd buffer space"); 79 "not enough priv cmd buffer space");
80 return err; 80 return err;
81 } 81 }
@@ -131,7 +131,7 @@ static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
131 131
132 err = gk20a_channel_alloc_priv_cmdbuf(c, 4 * num_wait_cmds, wait_cmd); 132 err = gk20a_channel_alloc_priv_cmdbuf(c, 4 * num_wait_cmds, wait_cmd);
133 if (err) { 133 if (err) {
134 gk20a_err(dev_from_gk20a(c->g), 134 nvgpu_err(c->g,
135 "not enough priv cmd buffer space"); 135 "not enough priv cmd buffer space");
136 sync_fence_put(sync_fence); 136 sync_fence_put(sync_fence);
137 return err; 137 return err;
@@ -360,7 +360,7 @@ gk20a_channel_syncpt_create(struct channel_gk20a *c)
360 c->hw_chid, syncpt_name); 360 c->hw_chid, syncpt_name);
361 if (!sp->id) { 361 if (!sp->id) {
362 nvgpu_kfree(c->g, sp); 362 nvgpu_kfree(c->g, sp);
363 gk20a_err(c->g->dev, "failed to get free syncpt"); 363 nvgpu_err(c->g, "failed to get free syncpt");
364 return NULL; 364 return NULL;
365 } 365 }
366 366
@@ -501,7 +501,7 @@ static void gk20a_channel_semaphore_launcher(
501 fence, fence->name); 501 fence, fence->name);
502 err = sync_fence_wait(fence, -1); 502 err = sync_fence_wait(fence, -1);
503 if (err < 0) 503 if (err < 0)
504 dev_err(g->dev, "error waiting pre-fence: %d\n", err); 504 nvgpu_err(g, "error waiting pre-fence: %d\n", err);
505 505
506 gk20a_dbg_info( 506 gk20a_dbg_info(
507 "wait completed (%d) for fence %p '%s', triggering gpu work", 507 "wait completed (%d) for fence %p '%s', triggering gpu work",
@@ -594,8 +594,8 @@ static int gk20a_channel_semaphore_wait_syncpt(
594{ 594{
595 struct gk20a_channel_semaphore *sema = 595 struct gk20a_channel_semaphore *sema =
596 container_of(s, struct gk20a_channel_semaphore, ops); 596 container_of(s, struct gk20a_channel_semaphore, ops);
597 struct device *dev = dev_from_gk20a(sema->c->g); 597 struct gk20a *g = sema->c->g;
598 gk20a_err(dev, "trying to use syncpoint synchronization"); 598 nvgpu_err(g, "trying to use syncpoint synchronization");
599 return -ENODEV; 599 return -ENODEV;
600} 600}
601 601
@@ -707,7 +707,7 @@ static int gk20a_channel_semaphore_wait_fd(
707 707
708 err = gk20a_channel_alloc_priv_cmdbuf(c, 8, wait_cmd); 708 err = gk20a_channel_alloc_priv_cmdbuf(c, 8, wait_cmd);
709 if (err) { 709 if (err) {
710 gk20a_err(dev_from_gk20a(c->g), 710 nvgpu_err(c->g,
711 "not enough priv cmd buffer space"); 711 "not enough priv cmd buffer space");
712 goto clean_up_sync_fence; 712 goto clean_up_sync_fence;
713 } 713 }
@@ -724,7 +724,7 @@ static int gk20a_channel_semaphore_wait_fd(
724 w->ch = c; 724 w->ch = c;
725 w->sema = nvgpu_semaphore_alloc(c); 725 w->sema = nvgpu_semaphore_alloc(c);
726 if (!w->sema) { 726 if (!w->sema) {
727 gk20a_err(dev_from_gk20a(c->g), "ran out of semaphores"); 727 nvgpu_err(c->g, "ran out of semaphores");
728 err = -ENOMEM; 728 err = -ENOMEM;
729 goto clean_up_worker; 729 goto clean_up_worker;
730 } 730 }
@@ -779,7 +779,7 @@ clean_up_sync_fence:
779 sync_fence_put(sync_fence); 779 sync_fence_put(sync_fence);
780 return err; 780 return err;
781#else 781#else
782 gk20a_err(dev_from_gk20a(c->g), 782 nvgpu_err(c->g,
783 "trying to use sync fds with CONFIG_SYNC disabled"); 783 "trying to use sync fds with CONFIG_SYNC disabled");
784 return -ENODEV; 784 return -ENODEV;
785#endif 785#endif
@@ -801,7 +801,7 @@ static int __gk20a_channel_semaphore_incr(
801 801
802 semaphore = nvgpu_semaphore_alloc(c); 802 semaphore = nvgpu_semaphore_alloc(c);
803 if (!semaphore) { 803 if (!semaphore) {
804 gk20a_err(dev_from_gk20a(c->g), 804 nvgpu_err(c->g,
805 "ran out of semaphores"); 805 "ran out of semaphores");
806 return -ENOMEM; 806 return -ENOMEM;
807 } 807 }
@@ -809,7 +809,7 @@ static int __gk20a_channel_semaphore_incr(
809 incr_cmd_size = 10; 809 incr_cmd_size = 10;
810 err = gk20a_channel_alloc_priv_cmdbuf(c, incr_cmd_size, incr_cmd); 810 err = gk20a_channel_alloc_priv_cmdbuf(c, incr_cmd_size, incr_cmd);
811 if (err) { 811 if (err) {
812 gk20a_err(dev_from_gk20a(c->g), 812 nvgpu_err(c->g,
813 "not enough priv cmd buffer space"); 813 "not enough priv cmd buffer space");
814 goto clean_up_sema; 814 goto clean_up_sema;
815 } 815 }
@@ -889,7 +889,7 @@ static int gk20a_channel_semaphore_incr_user(
889#else 889#else
890 struct gk20a_channel_semaphore *sema = 890 struct gk20a_channel_semaphore *sema =
891 container_of(s, struct gk20a_channel_semaphore, ops); 891 container_of(s, struct gk20a_channel_semaphore, ops);
892 gk20a_err(dev_from_gk20a(sema->c->g), 892 nvgpu_err(sema->c->g,
893 "trying to use sync fds with CONFIG_SYNC disabled"); 893 "trying to use sync fds with CONFIG_SYNC disabled");
894 return -ENODEV; 894 return -ENODEV;
895#endif 895#endif
diff --git a/drivers/gpu/nvgpu/gk20a/clk_gk20a.c b/drivers/gpu/nvgpu/gk20a/clk_gk20a.c
index 38d4b555..443cd5e1 100644
--- a/drivers/gpu/nvgpu/gk20a/clk_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/clk_gk20a.c
@@ -24,6 +24,8 @@
24 24
25#include "gk20a.h" 25#include "gk20a.h"
26 26
27#include <nvgpu/log.h>
28
27#include <nvgpu/hw/gk20a/hw_trim_gk20a.h> 29#include <nvgpu/hw/gk20a/hw_trim_gk20a.h>
28#include <nvgpu/hw/gk20a/hw_timer_gk20a.h> 30#include <nvgpu/hw/gk20a/hw_timer_gk20a.h>
29 31
@@ -251,7 +253,7 @@ static int clk_slide_gpc_pll(struct gk20a *g, u32 n)
251 gk20a_readl(g, trim_sys_gpcpll_ndiv_slowdown_r()); 253 gk20a_readl(g, trim_sys_gpcpll_ndiv_slowdown_r());
252 254
253 if (ramp_timeout <= 0) { 255 if (ramp_timeout <= 0) {
254 gk20a_err(dev_from_gk20a(g), "gpcpll dynamic ramp timeout"); 256 nvgpu_err(g, "gpcpll dynamic ramp timeout");
255 return -ETIMEDOUT; 257 return -ETIMEDOUT;
256 } 258 }
257 return 0; 259 return 0;
@@ -439,7 +441,7 @@ static int gk20a_init_clk_setup_sw(struct gk20a *g)
439 441
440 ref = clk_get_parent(clk_get_parent(clk->tegra_clk)); 442 ref = clk_get_parent(clk_get_parent(clk->tegra_clk));
441 if (IS_ERR(ref)) { 443 if (IS_ERR(ref)) {
442 gk20a_err(dev_from_gk20a(g), 444 nvgpu_err(g,
443 "failed to get GPCPLL reference clock"); 445 "failed to get GPCPLL reference clock");
444 err = -EINVAL; 446 err = -EINVAL;
445 goto fail; 447 goto fail;
@@ -449,7 +451,7 @@ static int gk20a_init_clk_setup_sw(struct gk20a *g)
449 clk->gpc_pll.id = GK20A_GPC_PLL; 451 clk->gpc_pll.id = GK20A_GPC_PLL;
450 clk->gpc_pll.clk_in = ref_rate / KHZ; 452 clk->gpc_pll.clk_in = ref_rate / KHZ;
451 if (clk->gpc_pll.clk_in == 0) { 453 if (clk->gpc_pll.clk_in == 0) {
452 gk20a_err(dev_from_gk20a(g), 454 nvgpu_err(g,
453 "GPCPLL reference clock is zero"); 455 "GPCPLL reference clock is zero");
454 err = -EINVAL; 456 err = -EINVAL;
455 goto fail; 457 goto fail;
@@ -508,7 +510,7 @@ static int set_pll_target(struct gk20a *g, u32 freq, u32 old_freq)
508 /* gpc_pll.freq is changed to new value here */ 510 /* gpc_pll.freq is changed to new value here */
509 if (clk_config_pll(clk, &clk->gpc_pll, &gpc_pll_params, 511 if (clk_config_pll(clk, &clk->gpc_pll, &gpc_pll_params,
510 &freq, true)) { 512 &freq, true)) {
511 gk20a_err(dev_from_gk20a(g), 513 nvgpu_err(g,
512 "failed to set pll target for %d", freq); 514 "failed to set pll target for %d", freq);
513 return -EINVAL; 515 return -EINVAL;
514 } 516 }
@@ -536,8 +538,7 @@ static int set_pll_freq(struct gk20a *g, u32 freq, u32 old_freq)
536 /* Just report error but not restore PLL since dvfs could already change 538 /* Just report error but not restore PLL since dvfs could already change
537 voltage even when it returns error. */ 539 voltage even when it returns error. */
538 if (err) 540 if (err)
539 gk20a_err(dev_from_gk20a(g), 541 nvgpu_err(g, "failed to set pll to %d", freq);
540 "failed to set pll to %d", freq);
541 return err; 542 return err;
542} 543}
543 544
diff --git a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c
index e5910e7f..76237e03 100644
--- a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c
@@ -27,6 +27,8 @@
27#include "gk20a.h" 27#include "gk20a.h"
28#include "css_gr_gk20a.h" 28#include "css_gr_gk20a.h"
29 29
30#include <nvgpu/log.h>
31
30#include <nvgpu/hw/gk20a/hw_perf_gk20a.h> 32#include <nvgpu/hw/gk20a/hw_perf_gk20a.h>
31#include <nvgpu/hw/gk20a/hw_mc_gk20a.h> 33#include <nvgpu/hw/gk20a/hw_mc_gk20a.h>
32 34
@@ -299,8 +301,7 @@ static int css_gr_flush_snapshots(struct channel_gk20a *ch)
299 cur->snapshot->hw_overflow_events_occured++; 301 cur->snapshot->hw_overflow_events_occured++;
300 } 302 }
301 303
302 gk20a_warn(dev_from_gk20a(g), 304 nvgpu_warn(g, "cyclestats: hardware overflow detected");
303 "cyclestats: hardware overflow detected\n");
304 } 305 }
305 306
306 /* process all items in HW buffer */ 307 /* process all items in HW buffer */
@@ -340,8 +341,7 @@ static int css_gr_flush_snapshots(struct channel_gk20a *ch)
340 dst_nxt = dst_head; 341 dst_nxt = dst_head;
341 } else { 342 } else {
342 /* client not found - skipping this entry */ 343 /* client not found - skipping this entry */
343 gk20a_warn(dev_from_gk20a(g), 344 nvgpu_warn(g, "cyclestats: orphaned perfmon %u",
344 "cyclestats: orphaned perfmon %u\n",
345 src->perfmon_id); 345 src->perfmon_id);
346 goto next_hw_fifo_entry; 346 goto next_hw_fifo_entry;
347 } 347 }
@@ -351,8 +351,7 @@ static int css_gr_flush_snapshots(struct channel_gk20a *ch)
351 if (dst_nxt == dst_get) { 351 if (dst_nxt == dst_get) {
352 /* no data copy, no pointer updates */ 352 /* no data copy, no pointer updates */
353 dst->sw_overflow_events_occured++; 353 dst->sw_overflow_events_occured++;
354 gk20a_warn(dev_from_gk20a(g), 354 nvgpu_warn(g, "cyclestats: perfmon %u soft overflow",
355 "cyclestats: perfmon %u soft overflow\n",
356 src->perfmon_id); 355 src->perfmon_id);
357 } else { 356 } else {
358 *dst_put = *src; 357 *dst_put = *src;
@@ -392,8 +391,7 @@ next_hw_fifo_entry:
392 /* not all entries proceed correctly. some of problems */ 391 /* not all entries proceed correctly. some of problems */
393 /* reported as overflows, some as orphaned perfmons, */ 392 /* reported as overflows, some as orphaned perfmons, */
394 /* but it will be better notify with summary about it */ 393 /* but it will be better notify with summary about it */
395 gk20a_warn(dev_from_gk20a(g), 394 nvgpu_warn(g, "cyclestats: completed %u from %u entries",
396 "cyclestats: completed %u from %u entries\n",
397 completed, pending); 395 completed, pending);
398 } 396 }
399 397
diff --git a/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c
index cc008844..b33845d1 100644
--- a/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c
@@ -32,6 +32,8 @@
32#include "gk20a.h" 32#include "gk20a.h"
33#include "gr_gk20a.h" 33#include "gr_gk20a.h"
34 34
35#include <nvgpu/log.h>
36
35#include <nvgpu/hw/gk20a/hw_ctxsw_prog_gk20a.h> 37#include <nvgpu/hw/gk20a/hw_ctxsw_prog_gk20a.h>
36#include <nvgpu/hw/gk20a/hw_gr_gk20a.h> 38#include <nvgpu/hw/gk20a/hw_gr_gk20a.h>
37 39
@@ -601,7 +603,7 @@ int gk20a_ctxsw_trace_write(struct gk20a *g,
601 603
602 write_idx = hdr->write_idx; 604 write_idx = hdr->write_idx;
603 if (write_idx >= dev->num_ents) { 605 if (write_idx >= dev->num_ents) {
604 gk20a_err(dev_from_gk20a(dev->g), 606 nvgpu_err(dev->g,
605 "write_idx=%u out of range [0..%u]", 607 "write_idx=%u out of range [0..%u]",
606 write_idx, dev->num_ents); 608 write_idx, dev->num_ents);
607 ret = -ENOSPC; 609 ret = -ENOSPC;
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index d7f8ceba..bc3f67c4 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -25,6 +25,7 @@
25#include <uapi/linux/nvgpu.h> 25#include <uapi/linux/nvgpu.h>
26 26
27#include <nvgpu/kmem.h> 27#include <nvgpu/kmem.h>
28#include <nvgpu/log.h>
28 29
29#include "gk20a.h" 30#include "gk20a.h"
30#include "gr_gk20a.h" 31#include "gr_gk20a.h"
@@ -229,7 +230,7 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
229 230
230 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); 231 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
231 if (!ch) { 232 if (!ch) {
232 gk20a_err(dev_from_gk20a(dbg_s->g), 233 nvgpu_err(dbg_s->g,
233 "no channel bound to dbg session\n"); 234 "no channel bound to dbg session\n");
234 return -EINVAL; 235 return -EINVAL;
235 } 236 }
@@ -248,7 +249,7 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
248 break; 249 break;
249 250
250 default: 251 default:
251 gk20a_err(dev_from_gk20a(dbg_s->g), 252 nvgpu_err(dbg_s->g,
252 "unrecognized dbg gpu events ctrl cmd: 0x%x", 253 "unrecognized dbg gpu events ctrl cmd: 0x%x",
253 args->cmd); 254 args->cmd);
254 ret = -EINVAL; 255 ret = -EINVAL;
@@ -402,7 +403,7 @@ static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s,
402 break; 403 break;
403 404
404 default: 405 default:
405 gk20a_err(dev_from_gk20a(g), 406 nvgpu_err(g,
406 "unrecognized dbg gpu timeout mode : 0x%x", 407 "unrecognized dbg gpu timeout mode : 0x%x",
407 timeout_mode); 408 timeout_mode);
408 err = -EINVAL; 409 err = -EINVAL;
@@ -742,7 +743,7 @@ static int nvgpu_dbg_gpu_ioctl_read_single_sm_error_state(
742 write_size); 743 write_size);
743 nvgpu_mutex_release(&g->dbg_sessions_lock); 744 nvgpu_mutex_release(&g->dbg_sessions_lock);
744 if (err) { 745 if (err) {
745 gk20a_err(dev_from_gk20a(g), "copy_to_user failed!\n"); 746 nvgpu_err(g, "copy_to_user failed!\n");
746 return err; 747 return err;
747 } 748 }
748 749
@@ -1099,7 +1100,7 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
1099 break; 1100 break;
1100 1101
1101 default: 1102 default:
1102 gk20a_err(dev_from_gk20a(g), 1103 nvgpu_err(g,
1103 "unrecognized dbg gpu ioctl cmd: 0x%x", 1104 "unrecognized dbg gpu ioctl cmd: 0x%x",
1104 cmd); 1105 cmd);
1105 err = -ENOTTY; 1106 err = -ENOTTY;
@@ -1146,14 +1147,13 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
1146 int err = 0, powergate_err = 0; 1147 int err = 0, powergate_err = 0;
1147 bool is_pg_disabled = false; 1148 bool is_pg_disabled = false;
1148 1149
1149 struct device *dev = dbg_s->dev;
1150 struct gk20a *g = dbg_s->g; 1150 struct gk20a *g = dbg_s->g;
1151 struct channel_gk20a *ch; 1151 struct channel_gk20a *ch;
1152 1152
1153 gk20a_dbg_fn("%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops); 1153 gk20a_dbg_fn("%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops);
1154 1154
1155 if (args->num_ops > g->gpu_characteristics.reg_ops_limit) { 1155 if (args->num_ops > g->gpu_characteristics.reg_ops_limit) {
1156 gk20a_err(dev, "regops limit exceeded"); 1156 nvgpu_err(g, "regops limit exceeded");
1157 return -EINVAL; 1157 return -EINVAL;
1158 } 1158 }
1159 1159
@@ -1163,25 +1163,25 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
1163 } 1163 }
1164 1164
1165 if (g->dbg_regops_tmp_buf_ops == 0 || !g->dbg_regops_tmp_buf) { 1165 if (g->dbg_regops_tmp_buf_ops == 0 || !g->dbg_regops_tmp_buf) {
1166 gk20a_err(dev, "reg ops work buffer not allocated"); 1166 nvgpu_err(g, "reg ops work buffer not allocated");
1167 return -ENODEV; 1167 return -ENODEV;
1168 } 1168 }
1169 1169
1170 if (!dbg_s->id) { 1170 if (!dbg_s->id) {
1171 gk20a_err(dev, "can't call reg_ops on an unbound debugger session"); 1171 nvgpu_err(g, "can't call reg_ops on an unbound debugger session");
1172 return -EINVAL; 1172 return -EINVAL;
1173 } 1173 }
1174 1174
1175 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); 1175 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1176 if (!dbg_s->is_profiler && !ch) { 1176 if (!dbg_s->is_profiler && !ch) {
1177 gk20a_err(dev, "bind a channel before regops for a debugging session"); 1177 nvgpu_err(g, "bind a channel before regops for a debugging session");
1178 return -EINVAL; 1178 return -EINVAL;
1179 } 1179 }
1180 1180
1181 /* be sure that ctx info is in place */ 1181 /* be sure that ctx info is in place */
1182 if (!gk20a_gpu_is_virtual(dbg_s->dev) && 1182 if (!gk20a_gpu_is_virtual(dbg_s->dev) &&
1183 !gr_context_info_available(dbg_s, &g->gr)) { 1183 !gr_context_info_available(dbg_s, &g->gr)) {
1184 gk20a_err(dev, "gr context data not available\n"); 1184 nvgpu_err(g, "gr context data not available\n");
1185 return -ENODEV; 1185 return -ENODEV;
1186 } 1186 }
1187 1187
@@ -1221,7 +1221,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
1221 1221
1222 if (copy_from_user(g->dbg_regops_tmp_buf, 1222 if (copy_from_user(g->dbg_regops_tmp_buf,
1223 fragment, fragment_size)) { 1223 fragment, fragment_size)) {
1224 dev_err(dev, "copy_from_user failed!"); 1224 nvgpu_err(g, "copy_from_user failed!");
1225 err = -EFAULT; 1225 err = -EFAULT;
1226 break; 1226 break;
1227 } 1227 }
@@ -1233,7 +1233,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
1233 1233
1234 if (copy_to_user(fragment, g->dbg_regops_tmp_buf, 1234 if (copy_to_user(fragment, g->dbg_regops_tmp_buf,
1235 fragment_size)) { 1235 fragment_size)) {
1236 dev_err(dev, "copy_to_user failed!"); 1236 nvgpu_err(g, "copy_to_user failed!");
1237 err = -EFAULT; 1237 err = -EFAULT;
1238 break; 1238 break;
1239 } 1239 }
@@ -1255,7 +1255,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
1255 err = powergate_err; 1255 err = powergate_err;
1256 1256
1257 if (err) 1257 if (err)
1258 gk20a_err(dev, "dbg regops failed"); 1258 nvgpu_err(g, "dbg regops failed");
1259 1259
1260 return err; 1260 return err;
1261} 1261}
@@ -1350,7 +1350,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode)
1350 break; 1350 break;
1351 1351
1352 default: 1352 default:
1353 gk20a_err(dev_from_gk20a(g), 1353 nvgpu_err(g,
1354 "unrecognized dbg gpu powergate mode: 0x%x", 1354 "unrecognized dbg gpu powergate mode: 0x%x",
1355 powermode); 1355 powermode);
1356 err = -ENOTTY; 1356 err = -ENOTTY;
@@ -1388,7 +1388,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
1388 1388
1389 err = gk20a_busy(g); 1389 err = gk20a_busy(g);
1390 if (err) { 1390 if (err) {
1391 gk20a_err(dev_from_gk20a(g), "failed to poweron"); 1391 nvgpu_err(g, "failed to poweron");
1392 return err; 1392 return err;
1393 } 1393 }
1394 1394
@@ -1397,7 +1397,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
1397 1397
1398 ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); 1398 ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1399 if (!ch_gk20a) { 1399 if (!ch_gk20a) {
1400 gk20a_err(dev_from_gk20a(g), 1400 nvgpu_err(g,
1401 "no bound channel for smpc ctxsw mode update\n"); 1401 "no bound channel for smpc ctxsw mode update\n");
1402 err = -EINVAL; 1402 err = -EINVAL;
1403 goto clean_up; 1403 goto clean_up;
@@ -1406,7 +1406,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
1406 err = g->ops.gr.update_smpc_ctxsw_mode(g, ch_gk20a, 1406 err = g->ops.gr.update_smpc_ctxsw_mode(g, ch_gk20a,
1407 args->mode == NVGPU_DBG_GPU_SMPC_CTXSW_MODE_CTXSW); 1407 args->mode == NVGPU_DBG_GPU_SMPC_CTXSW_MODE_CTXSW);
1408 if (err) { 1408 if (err) {
1409 gk20a_err(dev_from_gk20a(g), 1409 nvgpu_err(g,
1410 "error (%d) during smpc ctxsw mode update\n", err); 1410 "error (%d) during smpc ctxsw mode update\n", err);
1411 goto clean_up; 1411 goto clean_up;
1412 } 1412 }
@@ -1434,13 +1434,13 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
1434 * cleaned up. 1434 * cleaned up.
1435 */ 1435 */
1436 if (!dbg_s->has_profiler_reservation) { 1436 if (!dbg_s->has_profiler_reservation) {
1437 gk20a_err(dev_from_gk20a(g), 1437 nvgpu_err(g,
1438 "session doesn't have a valid reservation"); 1438 "session doesn't have a valid reservation");
1439 } 1439 }
1440 1440
1441 err = gk20a_busy(g); 1441 err = gk20a_busy(g);
1442 if (err) { 1442 if (err) {
1443 gk20a_err(dev_from_gk20a(g), "failed to poweron"); 1443 nvgpu_err(g, "failed to poweron");
1444 return err; 1444 return err;
1445 } 1445 }
1446 1446
@@ -1449,7 +1449,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
1449 1449
1450 ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); 1450 ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1451 if (!ch_gk20a) { 1451 if (!ch_gk20a) {
1452 gk20a_err(dev_from_gk20a(g), 1452 nvgpu_err(g,
1453 "no bound channel for pm ctxsw mode update\n"); 1453 "no bound channel for pm ctxsw mode update\n");
1454 err = -EINVAL; 1454 err = -EINVAL;
1455 goto clean_up; 1455 goto clean_up;
@@ -1458,7 +1458,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
1458 err = g->ops.gr.update_hwpm_ctxsw_mode(g, ch_gk20a, 1458 err = g->ops.gr.update_hwpm_ctxsw_mode(g, ch_gk20a,
1459 args->mode == NVGPU_DBG_GPU_HWPM_CTXSW_MODE_CTXSW); 1459 args->mode == NVGPU_DBG_GPU_HWPM_CTXSW_MODE_CTXSW);
1460 if (err) 1460 if (err)
1461 gk20a_err(dev_from_gk20a(g), 1461 nvgpu_err(g,
1462 "error (%d) during pm ctxsw mode update\n", err); 1462 "error (%d) during pm ctxsw mode update\n", err);
1463 1463
1464 /* gk20a would require a WAR to set the core PM_ENABLE bit, not 1464 /* gk20a would require a WAR to set the core PM_ENABLE bit, not
@@ -1486,7 +1486,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
1486 1486
1487 err = gk20a_busy(g); 1487 err = gk20a_busy(g);
1488 if (err) { 1488 if (err) {
1489 gk20a_err(dev_from_gk20a(g), "failed to poweron"); 1489 nvgpu_err(g, "failed to poweron");
1490 return err; 1490 return err;
1491 } 1491 }
1492 1492
@@ -1495,7 +1495,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
1495 /* Suspend GPU context switching */ 1495 /* Suspend GPU context switching */
1496 err = gr_gk20a_disable_ctxsw(g); 1496 err = gr_gk20a_disable_ctxsw(g);
1497 if (err) { 1497 if (err) {
1498 gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw"); 1498 nvgpu_err(g, "unable to stop gr ctxsw");
1499 /* this should probably be ctx-fatal... */ 1499 /* this should probably be ctx-fatal... */
1500 goto clean_up; 1500 goto clean_up;
1501 } 1501 }
@@ -1512,7 +1512,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
1512 1512
1513 err = gr_gk20a_enable_ctxsw(g); 1513 err = gr_gk20a_enable_ctxsw(g);
1514 if (err) 1514 if (err)
1515 gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n"); 1515 nvgpu_err(g, "unable to restart ctxsw!\n");
1516 1516
1517clean_up: 1517clean_up:
1518 nvgpu_mutex_release(&g->dbg_sessions_lock); 1518 nvgpu_mutex_release(&g->dbg_sessions_lock);
@@ -1544,7 +1544,7 @@ static int nvgpu_ioctl_allocate_profiler_object(
1544 else { 1544 else {
1545 prof_obj->ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); 1545 prof_obj->ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1546 if (prof_obj->ch == NULL) { 1546 if (prof_obj->ch == NULL) {
1547 gk20a_err(dev_from_gk20a(g), 1547 nvgpu_err(g,
1548 "bind a channel for dbg session"); 1548 "bind a channel for dbg session");
1549 nvgpu_kfree(g, prof_obj); 1549 nvgpu_kfree(g, prof_obj);
1550 err = -EINVAL; 1550 err = -EINVAL;
@@ -1582,7 +1582,7 @@ static int nvgpu_ioctl_free_profiler_object(
1582 dbg_profiler_object_data, prof_obj_entry) { 1582 dbg_profiler_object_data, prof_obj_entry) {
1583 if (prof_obj->prof_handle == args->profiler_handle) { 1583 if (prof_obj->prof_handle == args->profiler_handle) {
1584 if (prof_obj->session_id != dbg_s->id) { 1584 if (prof_obj->session_id != dbg_s->id) {
1585 gk20a_err(dev_from_gk20a(g), 1585 nvgpu_err(g,
1586 "invalid handle %x", 1586 "invalid handle %x",
1587 args->profiler_handle); 1587 args->profiler_handle);
1588 err = -EINVAL; 1588 err = -EINVAL;
@@ -1598,7 +1598,7 @@ static int nvgpu_ioctl_free_profiler_object(
1598 } 1598 }
1599 } 1599 }
1600 if (!obj_found) { 1600 if (!obj_found) {
1601 gk20a_err(dev_from_gk20a(g), "profiler %x not found", 1601 nvgpu_err(g, "profiler %x not found",
1602 args->profiler_handle); 1602 args->profiler_handle);
1603 err = -EINVAL; 1603 err = -EINVAL;
1604 } 1604 }
@@ -1618,7 +1618,7 @@ static struct dbg_profiler_object_data *find_matching_prof_obj(
1618 dbg_profiler_object_data, prof_obj_entry) { 1618 dbg_profiler_object_data, prof_obj_entry) {
1619 if (prof_obj->prof_handle == profiler_handle) { 1619 if (prof_obj->prof_handle == profiler_handle) {
1620 if (prof_obj->session_id != dbg_s->id) { 1620 if (prof_obj->session_id != dbg_s->id) {
1621 gk20a_err(dev_from_gk20a(g), 1621 nvgpu_err(g,
1622 "invalid handle %x", 1622 "invalid handle %x",
1623 profiler_handle); 1623 profiler_handle);
1624 return NULL; 1624 return NULL;
@@ -1667,7 +1667,7 @@ static void nvgpu_release_profiler_reservation(struct dbg_session_gk20a *dbg_s,
1667 1667
1668 g->profiler_reservation_count--; 1668 g->profiler_reservation_count--;
1669 if (g->profiler_reservation_count < 0) 1669 if (g->profiler_reservation_count < 0)
1670 gk20a_err(dev_from_gk20a(g), "Negative reservation count!"); 1670 nvgpu_err(g, "Negative reservation count!");
1671 dbg_s->has_profiler_reservation = false; 1671 dbg_s->has_profiler_reservation = false;
1672 prof_obj->has_reservation = false; 1672 prof_obj->has_reservation = false;
1673 if (prof_obj->ch == NULL) 1673 if (prof_obj->ch == NULL)
@@ -1684,7 +1684,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
1684 gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle); 1684 gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle);
1685 1685
1686 if (g->profiler_reservation_count < 0) { 1686 if (g->profiler_reservation_count < 0) {
1687 gk20a_err(dev_from_gk20a(g), "Negative reservation count!"); 1687 nvgpu_err(g, "Negative reservation count!");
1688 return -EINVAL; 1688 return -EINVAL;
1689 } 1689 }
1690 1690
@@ -1694,7 +1694,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
1694 my_prof_obj = find_matching_prof_obj(dbg_s, profiler_handle); 1694 my_prof_obj = find_matching_prof_obj(dbg_s, profiler_handle);
1695 1695
1696 if (!my_prof_obj) { 1696 if (!my_prof_obj) {
1697 gk20a_err(dev_from_gk20a(g), "object not found"); 1697 nvgpu_err(g, "object not found");
1698 err = -EINVAL; 1698 err = -EINVAL;
1699 goto exit; 1699 goto exit;
1700 } 1700 }
@@ -1711,7 +1711,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
1711 */ 1711 */
1712 if (!g->ops.dbg_session_ops.check_and_set_global_reservation( 1712 if (!g->ops.dbg_session_ops.check_and_set_global_reservation(
1713 dbg_s, my_prof_obj)) { 1713 dbg_s, my_prof_obj)) {
1714 gk20a_err(dev_from_gk20a(g), 1714 nvgpu_err(g,
1715 "global reserve: have existing reservation"); 1715 "global reserve: have existing reservation");
1716 err = -EBUSY; 1716 err = -EBUSY;
1717 } 1717 }
@@ -1719,7 +1719,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
1719 /* If there's a global reservation, 1719 /* If there's a global reservation,
1720 * we can't take a per-context one. 1720 * we can't take a per-context one.
1721 */ 1721 */
1722 gk20a_err(dev_from_gk20a(g), 1722 nvgpu_err(g,
1723 "per-ctxt reserve: global reservation in effect"); 1723 "per-ctxt reserve: global reservation in effect");
1724 err = -EBUSY; 1724 err = -EBUSY;
1725 } else if (gk20a_is_channel_marked_as_tsg(my_prof_obj->ch)) { 1725 } else if (gk20a_is_channel_marked_as_tsg(my_prof_obj->ch)) {
@@ -1732,7 +1732,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
1732 dbg_profiler_object_data, prof_obj_entry) { 1732 dbg_profiler_object_data, prof_obj_entry) {
1733 if (prof_obj->has_reservation && 1733 if (prof_obj->has_reservation &&
1734 (prof_obj->ch->tsgid == my_tsgid)) { 1734 (prof_obj->ch->tsgid == my_tsgid)) {
1735 gk20a_err(dev_from_gk20a(g), 1735 nvgpu_err(g,
1736 "per-ctxt reserve (tsg): already reserved"); 1736 "per-ctxt reserve (tsg): already reserved");
1737 err = -EBUSY; 1737 err = -EBUSY;
1738 goto exit; 1738 goto exit;
@@ -1742,7 +1742,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
1742 if (!g->ops.dbg_session_ops.check_and_set_context_reservation( 1742 if (!g->ops.dbg_session_ops.check_and_set_context_reservation(
1743 dbg_s, my_prof_obj)) { 1743 dbg_s, my_prof_obj)) {
1744 /* Another guest OS has the global reservation */ 1744 /* Another guest OS has the global reservation */
1745 gk20a_err(dev_from_gk20a(g), 1745 nvgpu_err(g,
1746 "per-ctxt reserve: global reservation in effect"); 1746 "per-ctxt reserve: global reservation in effect");
1747 err = -EBUSY; 1747 err = -EBUSY;
1748 } 1748 }
@@ -1756,7 +1756,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
1756 dbg_profiler_object_data, prof_obj_entry) { 1756 dbg_profiler_object_data, prof_obj_entry) {
1757 if (prof_obj->has_reservation && 1757 if (prof_obj->has_reservation &&
1758 (prof_obj->ch == my_ch)) { 1758 (prof_obj->ch == my_ch)) {
1759 gk20a_err(dev_from_gk20a(g), 1759 nvgpu_err(g,
1760 "per-ctxt reserve (ch): already reserved"); 1760 "per-ctxt reserve (ch): already reserved");
1761 err = -EBUSY; 1761 err = -EBUSY;
1762 goto exit; 1762 goto exit;
@@ -1766,7 +1766,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
1766 if (!g->ops.dbg_session_ops.check_and_set_context_reservation( 1766 if (!g->ops.dbg_session_ops.check_and_set_context_reservation(
1767 dbg_s, my_prof_obj)) { 1767 dbg_s, my_prof_obj)) {
1768 /* Another guest OS has the global reservation */ 1768 /* Another guest OS has the global reservation */
1769 gk20a_err(dev_from_gk20a(g), 1769 nvgpu_err(g,
1770 "per-ctxt reserve: global reservation in effect"); 1770 "per-ctxt reserve: global reservation in effect");
1771 err = -EBUSY; 1771 err = -EBUSY;
1772 } 1772 }
@@ -1791,7 +1791,7 @@ static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s,
1791 prof_obj = find_matching_prof_obj(dbg_s, profiler_handle); 1791 prof_obj = find_matching_prof_obj(dbg_s, profiler_handle);
1792 1792
1793 if (!prof_obj) { 1793 if (!prof_obj) {
1794 gk20a_err(dev_from_gk20a(g), "object not found"); 1794 nvgpu_err(g, "object not found");
1795 err = -EINVAL; 1795 err = -EINVAL;
1796 goto exit; 1796 goto exit;
1797 } 1797 }
@@ -1799,7 +1799,7 @@ static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s,
1799 if (prof_obj->has_reservation) 1799 if (prof_obj->has_reservation)
1800 g->ops.dbg_session_ops.release_profiler_reservation(dbg_s, prof_obj); 1800 g->ops.dbg_session_ops.release_profiler_reservation(dbg_s, prof_obj);
1801 else { 1801 else {
1802 gk20a_err(dev_from_gk20a(g), "No reservation found"); 1802 nvgpu_err(g, "No reservation found");
1803 err = -EINVAL; 1803 err = -EINVAL;
1804 goto exit; 1804 goto exit;
1805 } 1805 }
@@ -1854,7 +1854,7 @@ static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
1854 1854
1855 err = gk20a_busy(g); 1855 err = gk20a_busy(g);
1856 if (err) { 1856 if (err) {
1857 gk20a_err(dev_from_gk20a(g), "failed to poweron"); 1857 nvgpu_err(g, "failed to poweron");
1858 goto fail_unmap; 1858 goto fail_unmap;
1859 } 1859 }
1860 1860
@@ -1895,7 +1895,7 @@ static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s,
1895 1895
1896 err = gk20a_busy(g); 1896 err = gk20a_busy(g);
1897 if (err) { 1897 if (err) {
1898 gk20a_err(dev_from_gk20a(g), "failed to poweron"); 1898 nvgpu_err(g, "failed to poweron");
1899 return err; 1899 return err;
1900 } 1900 }
1901 1901
diff --git a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
index 5724be72..85b24f2e 100644
--- a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
@@ -19,6 +19,7 @@
19#include <nvgpu/log.h> 19#include <nvgpu/log.h>
20#include <nvgpu/kmem.h> 20#include <nvgpu/kmem.h>
21#include <nvgpu/semaphore.h> 21#include <nvgpu/semaphore.h>
22#include <nvgpu/log.h>
22 23
23#include "gk20a.h" 24#include "gk20a.h"
24#include "debug_gk20a.h" 25#include "debug_gk20a.h"
@@ -145,7 +146,7 @@ static int gk20a_gr_debug_show(struct seq_file *s, void *unused)
145 146
146 err = gk20a_busy(g); 147 err = gk20a_busy(g);
147 if (err) { 148 if (err) {
148 gk20a_err(dev, "failed to power on gpu: %d", err); 149 nvgpu_err(g, "failed to power on gpu: %d", err);
149 return -EINVAL; 150 return -EINVAL;
150 } 151 }
151 152
@@ -186,7 +187,7 @@ static int gk20a_debug_show(struct seq_file *s, void *unused)
186 187
187 err = gk20a_busy(g); 188 err = gk20a_busy(g);
188 if (err) { 189 if (err) {
189 gk20a_err(g->dev, "failed to power on gpu: %d", err); 190 nvgpu_err(g, "failed to power on gpu: %d", err);
190 return -EFAULT; 191 return -EFAULT;
191 } 192 }
192 193
diff --git a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
index 96b94ea7..ad228a8c 100644
--- a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
@@ -31,6 +31,8 @@
31#include "gk20a.h" 31#include "gk20a.h"
32#include "gr_gk20a.h" 32#include "gr_gk20a.h"
33 33
34#include <nvgpu/log.h>
35
34#include <nvgpu/hw/gk20a/hw_ctxsw_prog_gk20a.h> 36#include <nvgpu/hw/gk20a/hw_ctxsw_prog_gk20a.h>
35#include <nvgpu/hw/gk20a/hw_gr_gk20a.h> 37#include <nvgpu/hw/gk20a/hw_gr_gk20a.h>
36 38
@@ -156,7 +158,7 @@ static int gk20a_fecs_trace_hash_add(struct gk20a *g, u32 context_ptr, pid_t pid
156 158
157 he = nvgpu_kzalloc(g, sizeof(*he)); 159 he = nvgpu_kzalloc(g, sizeof(*he));
158 if (unlikely(!he)) { 160 if (unlikely(!he)) {
159 gk20a_warn(dev_from_gk20a(g), 161 nvgpu_warn(g,
160 "can't alloc new hash entry for context_ptr=%x pid=%d", 162 "can't alloc new hash entry for context_ptr=%x pid=%d",
161 context_ptr, pid); 163 context_ptr, pid);
162 return -ENOMEM; 164 return -ENOMEM;
@@ -255,7 +257,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index)
255 "consuming record trace=%p read=%d record=%p", trace, index, r); 257 "consuming record trace=%p read=%d record=%p", trace, index, r);
256 258
257 if (unlikely(!gk20a_fecs_trace_is_valid_record(r))) { 259 if (unlikely(!gk20a_fecs_trace_is_valid_record(r))) {
258 gk20a_warn(dev_from_gk20a(g), 260 nvgpu_warn(g,
259 "trace=%p read=%d record=%p magic_lo=%08x magic_hi=%08x (invalid)", 261 "trace=%p read=%d record=%p magic_lo=%08x magic_hi=%08x (invalid)",
260 trace, index, r, r->magic_lo, r->magic_hi); 262 trace, index, r, r->magic_lo, r->magic_hi);
261 return -EINVAL; 263 return -EINVAL;
@@ -342,7 +344,7 @@ static int gk20a_fecs_trace_poll(struct gk20a *g)
342 nvgpu_mutex_acquire(&trace->poll_lock); 344 nvgpu_mutex_acquire(&trace->poll_lock);
343 write = gk20a_fecs_trace_get_write_index(g); 345 write = gk20a_fecs_trace_get_write_index(g);
344 if (unlikely((write < 0) || (write >= GK20A_FECS_TRACE_NUM_RECORDS))) { 346 if (unlikely((write < 0) || (write >= GK20A_FECS_TRACE_NUM_RECORDS))) {
345 gk20a_err(dev_from_gk20a(g), 347 nvgpu_err(g,
346 "failed to acquire write index, write=%d", write); 348 "failed to acquire write index, write=%d", write);
347 err = write; 349 err = write;
348 goto done; 350 goto done;
@@ -571,7 +573,7 @@ static int gk20a_fecs_trace_init(struct gk20a *g)
571 573
572 trace = nvgpu_kzalloc(g, sizeof(struct gk20a_fecs_trace)); 574 trace = nvgpu_kzalloc(g, sizeof(struct gk20a_fecs_trace));
573 if (!trace) { 575 if (!trace) {
574 gk20a_warn(dev_from_gk20a(g), "failed to allocate fecs_trace"); 576 nvgpu_warn(g, "failed to allocate fecs_trace");
575 return -ENOMEM; 577 return -ENOMEM;
576 } 578 }
577 g->fecs_trace = trace; 579 g->fecs_trace = trace;
@@ -586,7 +588,7 @@ static int gk20a_fecs_trace_init(struct gk20a *g)
586 BUG_ON(!is_power_of_2(GK20A_FECS_TRACE_NUM_RECORDS)); 588 BUG_ON(!is_power_of_2(GK20A_FECS_TRACE_NUM_RECORDS));
587 err = gk20a_fecs_trace_alloc_ring(g); 589 err = gk20a_fecs_trace_alloc_ring(g);
588 if (err) { 590 if (err) {
589 gk20a_warn(dev_from_gk20a(g), "failed to allocate FECS ring"); 591 nvgpu_warn(g, "failed to allocate FECS ring");
590 goto clean_hash_lock; 592 goto clean_hash_lock;
591 } 593 }
592 594
@@ -754,7 +756,7 @@ static int gk20a_fecs_trace_enable(struct gk20a *g)
754 756
755 task = kthread_run(gk20a_fecs_trace_periodic_polling, g, __func__); 757 task = kthread_run(gk20a_fecs_trace_periodic_polling, g, __func__);
756 if (unlikely(IS_ERR(task))) { 758 if (unlikely(IS_ERR(task))) {
757 gk20a_warn(dev_from_gk20a(g), 759 nvgpu_warn(g,
758 "failed to create FECS polling task"); 760 "failed to create FECS polling task");
759 return PTR_ERR(task); 761 return PTR_ERR(task);
760 } 762 }
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index ca09c22a..48253e59 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -30,6 +30,7 @@
30#include <nvgpu/timers.h> 30#include <nvgpu/timers.h>
31#include <nvgpu/semaphore.h> 31#include <nvgpu/semaphore.h>
32#include <nvgpu/kmem.h> 32#include <nvgpu/kmem.h>
33#include <nvgpu/log.h>
33 34
34#include "gk20a.h" 35#include "gk20a.h"
35#include "debug_gk20a.h" 36#include "debug_gk20a.h"
@@ -105,7 +106,7 @@ struct fifo_engine_info_gk20a *gk20a_fifo_get_engine_info(struct gk20a *g, u32 e
105 } 106 }
106 107
107 if (!info) 108 if (!info)
108 gk20a_err(g->dev, "engine_id is not in active list/invalid %d", engine_id); 109 nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id);
109 110
110 return info; 111 return info;
111} 112}
@@ -131,7 +132,7 @@ bool gk20a_fifo_is_valid_engine_id(struct gk20a *g, u32 engine_id)
131 } 132 }
132 133
133 if (!valid) 134 if (!valid)
134 gk20a_err(g->dev, "engine_id is not in active list/invalid %d", engine_id); 135 nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id);
135 136
136 return valid; 137 return valid;
137} 138}
@@ -146,7 +147,7 @@ u32 gk20a_fifo_get_gr_engine_id(struct gk20a *g)
146 1, ENGINE_GR_GK20A); 147 1, ENGINE_GR_GK20A);
147 148
148 if (!gr_engine_cnt) { 149 if (!gr_engine_cnt) {
149 gk20a_err(dev_from_gk20a(g), "No GR engine available on this device!\n"); 150 nvgpu_err(g, "No GR engine available on this device!\n");
150 } 151 }
151 152
152 return gr_engine_id; 153 return gr_engine_id;
@@ -218,7 +219,7 @@ u32 gk20a_fifo_get_gr_runlist_id(struct gk20a *g)
218 1, ENGINE_GR_GK20A); 219 1, ENGINE_GR_GK20A);
219 220
220 if (!gr_engine_cnt) { 221 if (!gr_engine_cnt) {
221 gk20a_err(dev_from_gk20a(g), 222 nvgpu_err(g,
222 "No GR engine available on this device!"); 223 "No GR engine available on this device!");
223 goto end; 224 goto end;
224 } 225 }
@@ -228,7 +229,7 @@ u32 gk20a_fifo_get_gr_runlist_id(struct gk20a *g)
228 if (engine_info) { 229 if (engine_info) {
229 gr_runlist_id = engine_info->runlist_id; 230 gr_runlist_id = engine_info->runlist_id;
230 } else { 231 } else {
231 gk20a_err(g->dev, 232 nvgpu_err(g,
232 "gr_engine_id is not in active list/invalid %d", gr_engine_id); 233 "gr_engine_id is not in active list/invalid %d", gr_engine_id);
233 } 234 }
234 235
@@ -273,7 +274,7 @@ static inline u32 gk20a_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id)
273 if (engine_info) { 274 if (engine_info) {
274 fault_id = engine_info->fault_id; 275 fault_id = engine_info->fault_id;
275 } else { 276 } else {
276 gk20a_err(g->dev, "engine_id is not in active list/invalid %d", engine_id); 277 nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id);
277 } 278 }
278 return fault_id; 279 return fault_id;
279} 280}
@@ -321,7 +322,6 @@ int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type,
321int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) 322int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
322{ 323{
323 struct gk20a *g = f->g; 324 struct gk20a *g = f->g;
324 struct device *d = dev_from_gk20a(g);
325 u32 i; 325 u32 i;
326 u32 max_info_entries = top_device_info__size_1_v(); 326 u32 max_info_entries = top_device_info__size_1_v();
327 u32 engine_enum = ENGINE_INVAL_GK20A; 327 u32 engine_enum = ENGINE_INVAL_GK20A;
@@ -375,7 +375,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
375 } 375 }
376 376
377 if (!found_pbdma_for_runlist) { 377 if (!found_pbdma_for_runlist) {
378 gk20a_err(d, "busted pbdma map"); 378 nvgpu_err(g, "busted pbdma map");
379 return -EINVAL; 379 return -EINVAL;
380 } 380 }
381 } 381 }
@@ -647,7 +647,6 @@ static void fifo_engine_exception_status(struct gk20a *g,
647static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) 647static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
648{ 648{
649 struct fifo_runlist_info_gk20a *runlist; 649 struct fifo_runlist_info_gk20a *runlist;
650 struct device *d = dev_from_gk20a(g);
651 unsigned int runlist_id; 650 unsigned int runlist_id;
652 u32 i; 651 u32 i;
653 size_t runlist_size; 652 size_t runlist_size;
@@ -689,7 +688,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
689 int err = nvgpu_dma_alloc_sys(g, runlist_size, 688 int err = nvgpu_dma_alloc_sys(g, runlist_size,
690 &runlist->mem[i]); 689 &runlist->mem[i]);
691 if (err) { 690 if (err) {
692 dev_err(d, "memory allocation failed\n"); 691 nvgpu_err(g, "memory allocation failed\n");
693 goto clean_up_runlist; 692 goto clean_up_runlist;
694 } 693 }
695 } 694 }
@@ -888,7 +887,6 @@ static void gk20a_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f)
888static int gk20a_init_fifo_setup_sw(struct gk20a *g) 887static int gk20a_init_fifo_setup_sw(struct gk20a *g)
889{ 888{
890 struct fifo_gk20a *f = &g->fifo; 889 struct fifo_gk20a *f = &g->fifo;
891 struct device *d = dev_from_gk20a(g);
892 unsigned int chid, i; 890 unsigned int chid, i;
893 int err = 0; 891 int err = 0;
894 892
@@ -948,7 +946,7 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
948 err = nvgpu_dma_alloc_sys(g, f->userd_entry_size * 946 err = nvgpu_dma_alloc_sys(g, f->userd_entry_size *
949 f->num_channels, &f->userd); 947 f->num_channels, &f->userd);
950 if (err) { 948 if (err) {
951 dev_err(d, "userd memory allocation failed\n"); 949 nvgpu_err(g, "userd memory allocation failed\n");
952 goto clean_up; 950 goto clean_up;
953 } 951 }
954 gk20a_dbg(gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va); 952 gk20a_dbg(gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va);
@@ -1032,7 +1030,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g)
1032 smp_mb(); 1030 smp_mb();
1033 1031
1034 if (v1 != gk20a_bar1_readl(g, bar1_vaddr)) { 1032 if (v1 != gk20a_bar1_readl(g, bar1_vaddr)) {
1035 gk20a_err(dev_from_gk20a(g), "bar1 broken @ gk20a: CPU wrote 0x%x, \ 1033 nvgpu_err(g, "bar1 broken @ gk20a: CPU wrote 0x%x, \
1036 GPU read 0x%x", *cpu_vaddr, gk20a_bar1_readl(g, bar1_vaddr)); 1034 GPU read 0x%x", *cpu_vaddr, gk20a_bar1_readl(g, bar1_vaddr));
1037 return -EINVAL; 1035 return -EINVAL;
1038 } 1036 }
@@ -1040,14 +1038,14 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g)
1040 gk20a_bar1_writel(g, bar1_vaddr, v2); 1038 gk20a_bar1_writel(g, bar1_vaddr, v2);
1041 1039
1042 if (v2 != gk20a_bar1_readl(g, bar1_vaddr)) { 1040 if (v2 != gk20a_bar1_readl(g, bar1_vaddr)) {
1043 gk20a_err(dev_from_gk20a(g), "bar1 broken @ gk20a: GPU wrote 0x%x, \ 1041 nvgpu_err(g, "bar1 broken @ gk20a: GPU wrote 0x%x, \
1044 CPU read 0x%x", gk20a_bar1_readl(g, bar1_vaddr), *cpu_vaddr); 1042 CPU read 0x%x", gk20a_bar1_readl(g, bar1_vaddr), *cpu_vaddr);
1045 return -EINVAL; 1043 return -EINVAL;
1046 } 1044 }
1047 1045
1048 /* is it visible to the cpu? */ 1046 /* is it visible to the cpu? */
1049 if (*cpu_vaddr != v2) { 1047 if (*cpu_vaddr != v2) {
1050 gk20a_err(dev_from_gk20a(g), 1048 nvgpu_err(g,
1051 "cpu didn't see bar1 write @ %p!", 1049 "cpu didn't see bar1 write @ %p!",
1052 cpu_vaddr); 1050 cpu_vaddr);
1053 } 1051 }
@@ -1230,7 +1228,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
1230 } 1228 }
1231 1229
1232 if (engine_enum == ENGINE_INVAL_GK20A) 1230 if (engine_enum == ENGINE_INVAL_GK20A)
1233 gk20a_err(dev_from_gk20a(g), "unsupported engine_id %d", engine_id); 1231 nvgpu_err(g, "unsupported engine_id %d", engine_id);
1234 1232
1235 if (engine_enum == ENGINE_GR_GK20A) { 1233 if (engine_enum == ENGINE_GR_GK20A) {
1236 if (support_gk20a_pmu(g->dev) && g->elpg_enabled) 1234 if (support_gk20a_pmu(g->dev) && g->elpg_enabled)
@@ -1242,7 +1240,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
1242 g->ops.fecs_trace.reset(g); 1240 g->ops.fecs_trace.reset(g);
1243 /*HALT_PIPELINE method, halt GR engine*/ 1241 /*HALT_PIPELINE method, halt GR engine*/
1244 if (gr_gk20a_halt_pipe(g)) 1242 if (gr_gk20a_halt_pipe(g))
1245 gk20a_err(dev_from_gk20a(g), "failed to HALT gr pipe"); 1243 nvgpu_err(g, "failed to HALT gr pipe");
1246 /* resetting engine using mc_enable_r() is not 1244 /* resetting engine using mc_enable_r() is not
1247 enough, we do full init sequence */ 1245 enough, we do full init sequence */
1248 gk20a_gr_reset(g); 1246 gk20a_gr_reset(g);
@@ -1260,16 +1258,15 @@ static void gk20a_fifo_handle_chsw_fault(struct gk20a *g)
1260 u32 intr; 1258 u32 intr;
1261 1259
1262 intr = gk20a_readl(g, fifo_intr_chsw_error_r()); 1260 intr = gk20a_readl(g, fifo_intr_chsw_error_r());
1263 gk20a_err(dev_from_gk20a(g), "chsw: %08x\n", intr); 1261 nvgpu_err(g, "chsw: %08x\n", intr);
1264 gk20a_fecs_dump_falcon_stats(g); 1262 gk20a_fecs_dump_falcon_stats(g);
1265 gk20a_writel(g, fifo_intr_chsw_error_r(), intr); 1263 gk20a_writel(g, fifo_intr_chsw_error_r(), intr);
1266} 1264}
1267 1265
1268static void gk20a_fifo_handle_dropped_mmu_fault(struct gk20a *g) 1266static void gk20a_fifo_handle_dropped_mmu_fault(struct gk20a *g)
1269{ 1267{
1270 struct device *dev = dev_from_gk20a(g);
1271 u32 fault_id = gk20a_readl(g, fifo_intr_mmu_fault_id_r()); 1268 u32 fault_id = gk20a_readl(g, fifo_intr_mmu_fault_id_r());
1272 gk20a_err(dev, "dropped mmu fault (0x%08x)", fault_id); 1269 nvgpu_err(g, "dropped mmu fault (0x%08x)", fault_id);
1273} 1270}
1274 1271
1275bool gk20a_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid) 1272bool gk20a_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid)
@@ -1381,7 +1378,7 @@ bool gk20a_fifo_error_tsg(struct gk20a *g,
1381void gk20a_fifo_set_ctx_mmu_error_ch(struct gk20a *g, 1378void gk20a_fifo_set_ctx_mmu_error_ch(struct gk20a *g,
1382 struct channel_gk20a *refch) 1379 struct channel_gk20a *refch)
1383{ 1380{
1384 gk20a_err(dev_from_gk20a(g), 1381 nvgpu_err(g,
1385 "channel %d generated a mmu fault", refch->hw_chid); 1382 "channel %d generated a mmu fault", refch->hw_chid);
1386 gk20a_set_error_notifier(refch, 1383 gk20a_set_error_notifier(refch,
1387 NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT); 1384 NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT);
@@ -1392,7 +1389,7 @@ void gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g,
1392{ 1389{
1393 struct channel_gk20a *ch = NULL; 1390 struct channel_gk20a *ch = NULL;
1394 1391
1395 gk20a_err(dev_from_gk20a(g), 1392 nvgpu_err(g,
1396 "TSG %d generated a mmu fault", tsg->tsgid); 1393 "TSG %d generated a mmu fault", tsg->tsgid);
1397 1394
1398 down_read(&tsg->ch_list_lock); 1395 down_read(&tsg->ch_list_lock);
@@ -1544,7 +1541,7 @@ static bool gk20a_fifo_handle_mmu_fault(
1544 f.engine_subid_desc, 1541 f.engine_subid_desc,
1545 f.client_desc, 1542 f.client_desc,
1546 f.fault_type_desc); 1543 f.fault_type_desc);
1547 gk20a_err(dev_from_gk20a(g), "%s mmu fault on engine %d, " 1544 nvgpu_err(g, "%s mmu fault on engine %d, "
1548 "engine subid %d (%s), client %d (%s), " 1545 "engine subid %d (%s), client %d (%s), "
1549 "addr 0x%08x:0x%08x, type %d (%s), info 0x%08x," 1546 "addr 0x%08x:0x%08x, type %d (%s), info 0x%08x,"
1550 "inst_ptr 0x%llx\n", 1547 "inst_ptr 0x%llx\n",
@@ -1558,7 +1555,7 @@ static bool gk20a_fifo_handle_mmu_fault(
1558 1555
1559 if (ctxsw) { 1556 if (ctxsw) {
1560 gk20a_fecs_dump_falcon_stats(g); 1557 gk20a_fecs_dump_falcon_stats(g);
1561 gk20a_err(dev_from_gk20a(g), "gr_status_r : 0x%x", 1558 nvgpu_err(g, "gr_status_r : 0x%x",
1562 gk20a_readl(g, gr_status_r())); 1559 gk20a_readl(g, gr_status_r()));
1563 } 1560 }
1564 1561
@@ -1654,18 +1651,18 @@ static bool gk20a_fifo_handle_mmu_fault(
1654 gk20a_channel_abort(ch, false); 1651 gk20a_channel_abort(ch, false);
1655 gk20a_channel_put(ch); 1652 gk20a_channel_put(ch);
1656 } else { 1653 } else {
1657 gk20a_err(dev_from_gk20a(g), 1654 nvgpu_err(g,
1658 "mmu error in freed channel %d", 1655 "mmu error in freed channel %d",
1659 ch->hw_chid); 1656 ch->hw_chid);
1660 } 1657 }
1661 } else if (f.inst_ptr == 1658 } else if (f.inst_ptr ==
1662 gk20a_mm_inst_block_addr(g, &g->mm.bar1.inst_block)) { 1659 gk20a_mm_inst_block_addr(g, &g->mm.bar1.inst_block)) {
1663 gk20a_err(dev_from_gk20a(g), "mmu fault from bar1"); 1660 nvgpu_err(g, "mmu fault from bar1");
1664 } else if (f.inst_ptr == 1661 } else if (f.inst_ptr ==
1665 gk20a_mm_inst_block_addr(g, &g->mm.pmu.inst_block)) { 1662 gk20a_mm_inst_block_addr(g, &g->mm.pmu.inst_block)) {
1666 gk20a_err(dev_from_gk20a(g), "mmu fault from pmu"); 1663 nvgpu_err(g, "mmu fault from pmu");
1667 } else 1664 } else
1668 gk20a_err(dev_from_gk20a(g), "couldn't locate channel for mmu fault"); 1665 nvgpu_err(g, "couldn't locate channel for mmu fault");
1669 } 1666 }
1670 1667
1671 /* clear interrupt */ 1668 /* clear interrupt */
@@ -2137,7 +2134,7 @@ static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
2137 2134
2138 /* could not find the engine - should never happen */ 2135 /* could not find the engine - should never happen */
2139 if (!gk20a_fifo_is_valid_engine_id(g, engine_id)) { 2136 if (!gk20a_fifo_is_valid_engine_id(g, engine_id)) {
2140 gk20a_err(dev_from_gk20a(g), "fifo sched error : 0x%08x, failed to find engine\n", 2137 nvgpu_err(g, "fifo sched error : 0x%08x, failed to find engine\n",
2141 sched_error); 2138 sched_error);
2142 ret = false; 2139 ret = false;
2143 goto err; 2140 goto err;
@@ -2158,7 +2155,7 @@ static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
2158 } 2155 }
2159 2156
2160 if (ret) { 2157 if (ret) {
2161 gk20a_err(dev_from_gk20a(g), 2158 nvgpu_err(g,
2162 "fifo sched ctxsw timeout error: " 2159 "fifo sched ctxsw timeout error: "
2163 "engine=%u, %s=%d, ms=%u", 2160 "engine=%u, %s=%d, ms=%u",
2164 engine_id, is_tsg ? "tsg" : "ch", id, ms); 2161 engine_id, is_tsg ? "tsg" : "ch", id, ms);
@@ -2175,7 +2172,7 @@ static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
2175 "%s=%d", ms, is_tsg ? "tsg" : "ch", id); 2172 "%s=%d", ms, is_tsg ? "tsg" : "ch", id);
2176 } 2173 }
2177 } else { 2174 } else {
2178 gk20a_err(dev_from_gk20a(g), 2175 nvgpu_err(g,
2179 "fifo sched error : 0x%08x, engine=%u, %s=%d", 2176 "fifo sched error : 0x%08x, engine=%u, %s=%d",
2180 sched_error, engine_id, is_tsg ? "tsg" : "ch", id); 2177 sched_error, engine_id, is_tsg ? "tsg" : "ch", id);
2181 } 2178 }
@@ -2187,7 +2184,6 @@ err:
2187static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr) 2184static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
2188{ 2185{
2189 bool print_channel_reset_log = false; 2186 bool print_channel_reset_log = false;
2190 struct device *dev = dev_from_gk20a(g);
2191 u32 handled = 0; 2187 u32 handled = 0;
2192 2188
2193 gk20a_dbg_fn("fifo_intr=0x%08x", fifo_intr); 2189 gk20a_dbg_fn("fifo_intr=0x%08x", fifo_intr);
@@ -2195,13 +2191,13 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
2195 if (fifo_intr & fifo_intr_0_pio_error_pending_f()) { 2191 if (fifo_intr & fifo_intr_0_pio_error_pending_f()) {
2196 /* pio mode is unused. this shouldn't happen, ever. */ 2192 /* pio mode is unused. this shouldn't happen, ever. */
2197 /* should we clear it or just leave it pending? */ 2193 /* should we clear it or just leave it pending? */
2198 gk20a_err(dev, "fifo pio error!\n"); 2194 nvgpu_err(g, "fifo pio error!\n");
2199 BUG_ON(1); 2195 BUG_ON(1);
2200 } 2196 }
2201 2197
2202 if (fifo_intr & fifo_intr_0_bind_error_pending_f()) { 2198 if (fifo_intr & fifo_intr_0_bind_error_pending_f()) {
2203 u32 bind_error = gk20a_readl(g, fifo_intr_bind_error_r()); 2199 u32 bind_error = gk20a_readl(g, fifo_intr_bind_error_r());
2204 gk20a_err(dev, "fifo bind error: 0x%08x", bind_error); 2200 nvgpu_err(g, "fifo bind error: 0x%08x", bind_error);
2205 print_channel_reset_log = true; 2201 print_channel_reset_log = true;
2206 handled |= fifo_intr_0_bind_error_pending_f(); 2202 handled |= fifo_intr_0_bind_error_pending_f();
2207 } 2203 }
@@ -2233,7 +2229,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
2233 2229
2234 if (print_channel_reset_log) { 2230 if (print_channel_reset_log) {
2235 unsigned int engine_id; 2231 unsigned int engine_id;
2236 gk20a_err(dev_from_gk20a(g), 2232 nvgpu_err(g,
2237 "channel reset initiated from %s; intr=0x%08x", 2233 "channel reset initiated from %s; intr=0x%08x",
2238 __func__, fifo_intr); 2234 __func__, fifo_intr);
2239 for (engine_id = 0; 2235 for (engine_id = 0;
@@ -2301,8 +2297,7 @@ static bool gk20a_fifo_is_sw_method_subch(struct gk20a *g, int pbdma_id,
2301 return false; 2297 return false;
2302} 2298}
2303 2299
2304static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev, 2300static u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g,
2305 struct gk20a *g,
2306 struct fifo_gk20a *f, 2301 struct fifo_gk20a *f,
2307 u32 pbdma_id) 2302 u32 pbdma_id)
2308{ 2303{
@@ -2323,7 +2318,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
2323 if ((f->intr.pbdma.device_fatal_0 | 2318 if ((f->intr.pbdma.device_fatal_0 |
2324 f->intr.pbdma.channel_fatal_0 | 2319 f->intr.pbdma.channel_fatal_0 |
2325 f->intr.pbdma.restartable_0) & pbdma_intr_0) { 2320 f->intr.pbdma.restartable_0) & pbdma_intr_0) {
2326 gk20a_err(dev_from_gk20a(g), 2321 nvgpu_err(g,
2327 "pbdma_intr_0(%d):0x%08x PBH: %08x SHADOW: %08x M0: %08x %08x %08x %08x", 2322 "pbdma_intr_0(%d):0x%08x PBH: %08x SHADOW: %08x M0: %08x %08x %08x %08x",
2328 pbdma_id, pbdma_intr_0, 2323 pbdma_id, pbdma_intr_0,
2329 gk20a_readl(g, pbdma_pb_header_r(pbdma_id)), 2324 gk20a_readl(g, pbdma_pb_header_r(pbdma_id)),
@@ -2346,7 +2341,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
2346 gk20a_writel(g, pbdma_acquire_r(pbdma_id), val); 2341 gk20a_writel(g, pbdma_acquire_r(pbdma_id), val);
2347 if (g->timeouts_enabled) { 2342 if (g->timeouts_enabled) {
2348 reset = true; 2343 reset = true;
2349 gk20a_err(dev_from_gk20a(g), 2344 nvgpu_err(g,
2350 "semaphore acquire timeout!"); 2345 "semaphore acquire timeout!");
2351 } 2346 }
2352 handled |= pbdma_intr_0_acquire_pending_f(); 2347 handled |= pbdma_intr_0_acquire_pending_f();
@@ -2387,7 +2382,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
2387 /* all intrs in _intr_1 are "host copy engine" related, 2382 /* all intrs in _intr_1 are "host copy engine" related,
2388 * which gk20a doesn't have. for now just make them channel fatal. */ 2383 * which gk20a doesn't have. for now just make them channel fatal. */
2389 if (pbdma_intr_1) { 2384 if (pbdma_intr_1) {
2390 dev_err(dev, "channel hce error: pbdma_intr_1(%d): 0x%08x", 2385 nvgpu_err(g, "channel hce error: pbdma_intr_1(%d): 0x%08x",
2391 pbdma_id, pbdma_intr_1); 2386 pbdma_id, pbdma_intr_1);
2392 reset = true; 2387 reset = true;
2393 gk20a_writel(g, pbdma_intr_1_r(pbdma_id), pbdma_intr_1); 2388 gk20a_writel(g, pbdma_intr_1_r(pbdma_id), pbdma_intr_1);
@@ -2428,7 +2423,6 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
2428 2423
2429static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr) 2424static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr)
2430{ 2425{
2431 struct device *dev = dev_from_gk20a(g);
2432 struct fifo_gk20a *f = &g->fifo; 2426 struct fifo_gk20a *f = &g->fifo;
2433 u32 clear_intr = 0, i; 2427 u32 clear_intr = 0, i;
2434 u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); 2428 u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
@@ -2438,7 +2432,7 @@ static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr)
2438 if (fifo_intr_pbdma_id_status_v(pbdma_pending, i)) { 2432 if (fifo_intr_pbdma_id_status_v(pbdma_pending, i)) {
2439 gk20a_dbg(gpu_dbg_intr, "pbdma id %d intr pending", i); 2433 gk20a_dbg(gpu_dbg_intr, "pbdma id %d intr pending", i);
2440 clear_intr |= 2434 clear_intr |=
2441 gk20a_fifo_handle_pbdma_intr(dev, g, f, i); 2435 gk20a_fifo_handle_pbdma_intr(g, f, i);
2442 } 2436 }
2443 } 2437 }
2444 return fifo_intr_0_pbdma_intr_pending_f(); 2438 return fifo_intr_0_pbdma_intr_pending_f();
@@ -2534,7 +2528,7 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
2534 struct tsg_gk20a *tsg = &g->fifo.tsg[id]; 2528 struct tsg_gk20a *tsg = &g->fifo.tsg[id];
2535 struct channel_gk20a *ch = NULL; 2529 struct channel_gk20a *ch = NULL;
2536 2530
2537 gk20a_err(dev_from_gk20a(g), 2531 nvgpu_err(g,
2538 "preempt TSG %d timeout\n", id); 2532 "preempt TSG %d timeout\n", id);
2539 2533
2540 down_read(&tsg->ch_list_lock); 2534 down_read(&tsg->ch_list_lock);
@@ -2550,7 +2544,7 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
2550 } else { 2544 } else {
2551 struct channel_gk20a *ch = &g->fifo.channel[id]; 2545 struct channel_gk20a *ch = &g->fifo.channel[id];
2552 2546
2553 gk20a_err(dev_from_gk20a(g), 2547 nvgpu_err(g,
2554 "preempt channel %d timeout\n", id); 2548 "preempt channel %d timeout\n", id);
2555 2549
2556 if (gk20a_channel_get(ch)) { 2550 if (gk20a_channel_get(ch)) {
@@ -2733,7 +2727,7 @@ int gk20a_fifo_enable_all_engine_activity(struct gk20a *g)
2733 err = gk20a_fifo_enable_engine_activity(g, 2727 err = gk20a_fifo_enable_engine_activity(g,
2734 &g->fifo.engine_info[active_engine_id]); 2728 &g->fifo.engine_info[active_engine_id]);
2735 if (err) { 2729 if (err) {
2736 gk20a_err(dev_from_gk20a(g), 2730 nvgpu_err(g,
2737 "failed to enable engine %d activity\n", active_engine_id); 2731 "failed to enable engine %d activity\n", active_engine_id);
2738 ret = err; 2732 ret = err;
2739 } 2733 }
@@ -2806,7 +2800,7 @@ clean_up:
2806 if (err) { 2800 if (err) {
2807 gk20a_dbg_fn("failed"); 2801 gk20a_dbg_fn("failed");
2808 if (gk20a_fifo_enable_engine_activity(g, eng_info)) 2802 if (gk20a_fifo_enable_engine_activity(g, eng_info))
2809 gk20a_err(dev_from_gk20a(g), 2803 nvgpu_err(g,
2810 "failed to enable gr engine activity\n"); 2804 "failed to enable gr engine activity\n");
2811 } else { 2805 } else {
2812 gk20a_dbg_fn("done"); 2806 gk20a_dbg_fn("done");
@@ -3155,7 +3149,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
3155 ret = gk20a_fifo_runlist_wait_pending(g, runlist_id); 3149 ret = gk20a_fifo_runlist_wait_pending(g, runlist_id);
3156 3150
3157 if (ret == -ETIMEDOUT) { 3151 if (ret == -ETIMEDOUT) {
3158 gk20a_err(dev_from_gk20a(g), 3152 nvgpu_err(g,
3159 "runlist update timeout"); 3153 "runlist update timeout");
3160 3154
3161 gk20a_fifo_runlist_reset_engines(g, runlist_id); 3155 gk20a_fifo_runlist_reset_engines(g, runlist_id);
@@ -3167,10 +3161,10 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
3167 * should be fine */ 3161 * should be fine */
3168 3162
3169 if (ret) 3163 if (ret)
3170 gk20a_err(dev_from_gk20a(g), 3164 nvgpu_err(g,
3171 "runlist update failed: %d", ret); 3165 "runlist update failed: %d", ret);
3172 } else if (ret == -EINTR) 3166 } else if (ret == -EINTR)
3173 gk20a_err(dev_from_gk20a(g), 3167 nvgpu_err(g,
3174 "runlist update interrupted"); 3168 "runlist update interrupted");
3175 } 3169 }
3176 3170
@@ -3196,7 +3190,7 @@ int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 hw_chid,
3196 /* Capture the last failure error code */ 3190 /* Capture the last failure error code */
3197 errcode = g->ops.fifo.update_runlist(g, runlist_id, hw_chid, add, wait_for_finish); 3191 errcode = g->ops.fifo.update_runlist(g, runlist_id, hw_chid, add, wait_for_finish);
3198 if (errcode) { 3192 if (errcode) {
3199 gk20a_err(dev_from_gk20a(g), 3193 nvgpu_err(g,
3200 "failed to update_runlist %d %d", runlist_id, errcode); 3194 "failed to update_runlist %d %d", runlist_id, errcode);
3201 ret = errcode; 3195 ret = errcode;
3202 } 3196 }
@@ -4051,8 +4045,7 @@ int gk20a_fifo_set_timeslice(struct channel_gk20a *ch, u32 timeslice)
4051 struct gk20a *g = ch->g; 4045 struct gk20a *g = ch->g;
4052 4046
4053 if (gk20a_is_channel_marked_as_tsg(ch)) { 4047 if (gk20a_is_channel_marked_as_tsg(ch)) {
4054 gk20a_err(dev_from_gk20a(ch->g), 4048 nvgpu_err(g, "invalid operation for TSG!\n");
4055 "invalid operation for TSG!\n");
4056 return -EINVAL; 4049 return -EINVAL;
4057 } 4050 }
4058 4051
@@ -4071,8 +4064,7 @@ int gk20a_fifo_set_timeslice(struct channel_gk20a *ch, u32 timeslice)
4071int gk20a_fifo_set_priority(struct channel_gk20a *ch, u32 priority) 4064int gk20a_fifo_set_priority(struct channel_gk20a *ch, u32 priority)
4072{ 4065{
4073 if (gk20a_is_channel_marked_as_tsg(ch)) { 4066 if (gk20a_is_channel_marked_as_tsg(ch)) {
4074 gk20a_err(dev_from_gk20a(ch->g), 4067 nvgpu_err(ch->g, "invalid operation for TSG!\n");
4075 "invalid operation for TSG!\n");
4076 return -EINVAL; 4068 return -EINVAL;
4077 } 4069 }
4078 4070
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c
index c8acf6f7..05e3c3f4 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.c
@@ -282,7 +282,7 @@ static int gk20a_init_support(struct platform_device *dev)
282 g->regs = gk20a_ioremap_resource(dev, GK20A_BAR0_IORESOURCE_MEM, 282 g->regs = gk20a_ioremap_resource(dev, GK20A_BAR0_IORESOURCE_MEM,
283 &g->reg_mem); 283 &g->reg_mem);
284 if (IS_ERR(g->regs)) { 284 if (IS_ERR(g->regs)) {
285 dev_err(dev_from_gk20a(g), "failed to remap gk20a registers\n"); 285 nvgpu_err(g, "failed to remap gk20a registers\n");
286 err = PTR_ERR(g->regs); 286 err = PTR_ERR(g->regs);
287 goto fail; 287 goto fail;
288 } 288 }
@@ -290,7 +290,7 @@ static int gk20a_init_support(struct platform_device *dev)
290 g->bar1 = gk20a_ioremap_resource(dev, GK20A_BAR1_IORESOURCE_MEM, 290 g->bar1 = gk20a_ioremap_resource(dev, GK20A_BAR1_IORESOURCE_MEM,
291 &g->bar1_mem); 291 &g->bar1_mem);
292 if (IS_ERR(g->bar1)) { 292 if (IS_ERR(g->bar1)) {
293 dev_err(dev_from_gk20a(g), "failed to remap gk20a bar1\n"); 293 nvgpu_err(g, "failed to remap gk20a bar1\n");
294 err = PTR_ERR(g->bar1); 294 err = PTR_ERR(g->bar1);
295 goto fail; 295 goto fail;
296 } 296 }
@@ -411,7 +411,7 @@ int gk20a_pm_finalize_poweron(struct device *dev)
411 if (platform->busy) { 411 if (platform->busy) {
412 err = platform->busy(dev); 412 err = platform->busy(dev);
413 if (err < 0) { 413 if (err < 0) {
414 dev_err(dev, "%s: failed to poweron platform dependency\n", 414 nvgpu_err(g, "%s: failed to poweron platform dependency\n",
415 __func__); 415 __func__);
416 goto done; 416 goto done;
417 } 417 }
@@ -467,7 +467,7 @@ int gk20a_pm_finalize_poweron(struct device *dev)
467 if (g->ops.clk.init_clk_support) { 467 if (g->ops.clk.init_clk_support) {
468 err = g->ops.clk.init_clk_support(g); 468 err = g->ops.clk.init_clk_support(g);
469 if (err) { 469 if (err) {
470 gk20a_err(dev, "failed to init gk20a clk"); 470 nvgpu_err(g, "failed to init gk20a clk");
471 goto done; 471 goto done;
472 } 472 }
473 } 473 }
@@ -475,7 +475,7 @@ int gk20a_pm_finalize_poweron(struct device *dev)
475 err = g->ops.fifo.reset_enable_hw(g); 475 err = g->ops.fifo.reset_enable_hw(g);
476 476
477 if (err) { 477 if (err) {
478 gk20a_err(dev, "failed to reset gk20a fifo"); 478 nvgpu_err(g, "failed to reset gk20a fifo");
479 goto done; 479 goto done;
480 } 480 }
481 481
@@ -484,13 +484,13 @@ int gk20a_pm_finalize_poweron(struct device *dev)
484 484
485 err = gk20a_init_mm_support(g); 485 err = gk20a_init_mm_support(g);
486 if (err) { 486 if (err) {
487 gk20a_err(dev, "failed to init gk20a mm"); 487 nvgpu_err(g, "failed to init gk20a mm");
488 goto done; 488 goto done;
489 } 489 }
490 490
491 err = gk20a_init_fifo_support(g); 491 err = gk20a_init_fifo_support(g);
492 if (err) { 492 if (err) {
493 gk20a_err(dev, "failed to init gk20a fifo"); 493 nvgpu_err(g, "failed to init gk20a fifo");
494 goto done; 494 goto done;
495 } 495 }
496 496
@@ -501,7 +501,7 @@ int gk20a_pm_finalize_poweron(struct device *dev)
501 501
502 err = gk20a_enable_gr_hw(g); 502 err = gk20a_enable_gr_hw(g);
503 if (err) { 503 if (err) {
504 gk20a_err(dev, "failed to enable gr"); 504 nvgpu_err(g, "failed to enable gr");
505 goto done; 505 goto done;
506 } 506 }
507 507
@@ -509,7 +509,7 @@ int gk20a_pm_finalize_poweron(struct device *dev)
509 if (g->ops.pmu.prepare_ucode) 509 if (g->ops.pmu.prepare_ucode)
510 err = g->ops.pmu.prepare_ucode(g); 510 err = g->ops.pmu.prepare_ucode(g);
511 if (err) { 511 if (err) {
512 gk20a_err(dev, "failed to init pmu ucode"); 512 nvgpu_err(g, "failed to init pmu ucode");
513 goto done; 513 goto done;
514 } 514 }
515 } 515 }
@@ -518,7 +518,7 @@ int gk20a_pm_finalize_poweron(struct device *dev)
518 if (g->ops.pmupstate) { 518 if (g->ops.pmupstate) {
519 err = gk20a_init_pstate_support(g); 519 err = gk20a_init_pstate_support(g);
520 if (err) { 520 if (err) {
521 gk20a_err(dev, "failed to init pstates"); 521 nvgpu_err(g, "failed to init pstates");
522 goto done; 522 goto done;
523 } 523 }
524 } 524 }
@@ -527,21 +527,21 @@ int gk20a_pm_finalize_poweron(struct device *dev)
527 if (g->ops.pmu.is_pmu_supported(g)) { 527 if (g->ops.pmu.is_pmu_supported(g)) {
528 err = gk20a_init_pmu_support(g); 528 err = gk20a_init_pmu_support(g);
529 if (err) { 529 if (err) {
530 gk20a_err(dev, "failed to init gk20a pmu"); 530 nvgpu_err(g, "failed to init gk20a pmu");
531 goto done; 531 goto done;
532 } 532 }
533 } 533 }
534 534
535 err = gk20a_init_gr_support(g); 535 err = gk20a_init_gr_support(g);
536 if (err) { 536 if (err) {
537 gk20a_err(dev, "failed to init gk20a gr"); 537 nvgpu_err(g, "failed to init gk20a gr");
538 goto done; 538 goto done;
539 } 539 }
540 540
541 if (g->ops.pmu.mclk_init) { 541 if (g->ops.pmu.mclk_init) {
542 err = g->ops.pmu.mclk_init(g); 542 err = g->ops.pmu.mclk_init(g);
543 if (err) { 543 if (err) {
544 gk20a_err(dev, "failed to set mclk"); 544 nvgpu_err(g, "failed to set mclk");
545 /* Indicate error dont goto done */ 545 /* Indicate error dont goto done */
546 } 546 }
547 } 547 }
@@ -550,37 +550,37 @@ int gk20a_pm_finalize_poweron(struct device *dev)
550 if (g->ops.pmupstate) { 550 if (g->ops.pmupstate) {
551 err = gk20a_init_pstate_pmu_support(g); 551 err = gk20a_init_pstate_pmu_support(g);
552 if (err) { 552 if (err) {
553 gk20a_err(dev, "failed to init pstates"); 553 nvgpu_err(g, "failed to init pstates");
554 goto done; 554 goto done;
555 } 555 }
556 } 556 }
557 557
558 err = nvgpu_clk_arb_init_arbiter(g); 558 err = nvgpu_clk_arb_init_arbiter(g);
559 if (err) { 559 if (err) {
560 gk20a_err(dev, "failed to init clk arb"); 560 nvgpu_err(g, "failed to init clk arb");
561 goto done; 561 goto done;
562 } 562 }
563#endif 563#endif
564 564
565 err = gk20a_init_therm_support(g); 565 err = gk20a_init_therm_support(g);
566 if (err) { 566 if (err) {
567 gk20a_err(dev, "failed to init gk20a therm"); 567 nvgpu_err(g, "failed to init gk20a therm");
568 goto done; 568 goto done;
569 } 569 }
570 570
571 err = g->ops.chip_init_gpu_characteristics(g); 571 err = g->ops.chip_init_gpu_characteristics(g);
572 if (err) { 572 if (err) {
573 gk20a_err(dev, "failed to init gk20a gpu characteristics"); 573 nvgpu_err(g, "failed to init gk20a gpu characteristics");
574 goto done; 574 goto done;
575 } 575 }
576 576
577 err = gk20a_ctxsw_trace_init(g); 577 err = gk20a_ctxsw_trace_init(g);
578 if (err) 578 if (err)
579 gk20a_warn(dev, "could not initialize ctxsw tracing"); 579 nvgpu_warn(g, "could not initialize ctxsw tracing");
580 580
581 err = gk20a_sched_ctrl_init(g); 581 err = gk20a_sched_ctrl_init(g);
582 if (err) { 582 if (err) {
583 gk20a_err(dev, "failed to init sched control"); 583 nvgpu_err(g, "failed to init sched control");
584 goto done; 584 goto done;
585 } 585 }
586 586
@@ -619,7 +619,7 @@ int gk20a_pm_finalize_poweron(struct device *dev)
619 speed = 1 << (fls(speed) - 1); 619 speed = 1 << (fls(speed) - 1);
620 err = g->ops.xve.set_speed(g, speed); 620 err = g->ops.xve.set_speed(g, speed);
621 if (err) { 621 if (err) {
622 gk20a_err(dev, "Failed to set PCIe bus speed!\n"); 622 nvgpu_err(g, "Failed to set PCIe bus speed!\n");
623 goto done; 623 goto done;
624 } 624 }
625 } 625 }
@@ -1312,7 +1312,7 @@ int __gk20a_do_idle(struct device *dev, bool force_reset)
1312 } while (ref_cnt != target_ref_cnt && !nvgpu_timeout_expired(&timeout)); 1312 } while (ref_cnt != target_ref_cnt && !nvgpu_timeout_expired(&timeout));
1313 1313
1314 if (ref_cnt != target_ref_cnt) { 1314 if (ref_cnt != target_ref_cnt) {
1315 gk20a_err(dev, "failed to idle - refcount %d != 1\n", 1315 nvgpu_err(g, "failed to idle - refcount %d != 1\n",
1316 ref_cnt); 1316 ref_cnt);
1317 goto fail_drop_usage_count; 1317 goto fail_drop_usage_count;
1318 } 1318 }
@@ -1344,7 +1344,7 @@ int __gk20a_do_idle(struct device *dev, bool force_reset)
1344 if (is_railgated) { 1344 if (is_railgated) {
1345 return 0; 1345 return 0;
1346 } else { 1346 } else {
1347 gk20a_err(dev, "failed to idle in timeout\n"); 1347 nvgpu_err(g, "failed to idle in timeout\n");
1348 goto fail_timeout; 1348 goto fail_timeout;
1349 } 1349 }
1350 } else { 1350 } else {
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_scale.c b/drivers/gpu/nvgpu/gk20a/gk20a_scale.c
index b411cb5c..06c73b90 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a_scale.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a_scale.c
@@ -31,6 +31,8 @@
31#include "clk_gk20a.h" 31#include "clk_gk20a.h"
32#include "gk20a_scale.h" 32#include "gk20a_scale.h"
33 33
34#include <nvgpu/log.h>
35
34/* 36/*
35 * gk20a_scale_qos_notify() 37 * gk20a_scale_qos_notify()
36 * 38 *
@@ -59,8 +61,8 @@ int gk20a_scale_qos_notify(struct notifier_block *nb,
59 pm_qos_read_max_bound(PM_QOS_GPU_FREQ_BOUNDS) * 1000; 61 pm_qos_read_max_bound(PM_QOS_GPU_FREQ_BOUNDS) * 1000;
60 62
61 if (profile->qos_min_freq > profile->qos_max_freq) { 63 if (profile->qos_min_freq > profile->qos_max_freq) {
62 gk20a_err(g->dev, 64 nvgpu_err(g,
63 "QoS: setting invalid limit, min_freq=%lu max_freq=%lu\n", 65 "QoS: setting invalid limit, min_freq=%lu max_freq=%lu",
64 profile->qos_min_freq, profile->qos_max_freq); 66 profile->qos_min_freq, profile->qos_max_freq);
65 profile->qos_min_freq = profile->qos_max_freq; 67 profile->qos_min_freq = profile->qos_max_freq;
66 } 68 }
diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c
index 96185ee7..712359e1 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c
@@ -23,6 +23,7 @@
23 23
24#include <nvgpu/nvgpu_common.h> 24#include <nvgpu/nvgpu_common.h>
25#include <nvgpu/kmem.h> 25#include <nvgpu/kmem.h>
26#include <nvgpu/log.h>
26 27
27#include "gk20a.h" 28#include "gk20a.h"
28#include "gr_ctx_gk20a.h" 29#include "gr_ctx_gk20a.h"
@@ -111,7 +112,6 @@ static bool gr_gk20a_is_firmware_defined(void)
111 112
112static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) 113static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
113{ 114{
114 struct device *d = dev_from_gk20a(g);
115 const struct firmware *netlist_fw; 115 const struct firmware *netlist_fw;
116 struct netlist_image *netlist = NULL; 116 struct netlist_image *netlist = NULL;
117 char name[MAX_NETLIST_NAME]; 117 char name[MAX_NETLIST_NAME];
@@ -135,13 +135,13 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
135 135
136 for (; net < max; net++) { 136 for (; net < max; net++) {
137 if (g->ops.gr_ctx.get_netlist_name(g, net, name) != 0) { 137 if (g->ops.gr_ctx.get_netlist_name(g, net, name) != 0) {
138 gk20a_warn(d, "invalid netlist index %d", net); 138 nvgpu_warn(g, "invalid netlist index %d", net);
139 continue; 139 continue;
140 } 140 }
141 141
142 netlist_fw = nvgpu_request_firmware(g, name, 0); 142 netlist_fw = nvgpu_request_firmware(g, name, 0);
143 if (!netlist_fw) { 143 if (!netlist_fw) {
144 gk20a_warn(d, "failed to load netlist %s", name); 144 nvgpu_warn(g, "failed to load netlist %s", name);
145 continue; 145 continue;
146 } 146 }
147 147
@@ -436,7 +436,7 @@ done:
436 gk20a_dbg_info("netlist image %s loaded", name); 436 gk20a_dbg_info("netlist image %s loaded", name);
437 return 0; 437 return 0;
438 } else { 438 } else {
439 gk20a_err(d, "failed to load netlist image!!"); 439 nvgpu_err(g, "failed to load netlist image!!");
440 return err; 440 return err;
441 } 441 }
442} 442}
diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c
index 2fdbc01a..12ec9c5f 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c
@@ -23,6 +23,8 @@
23#include "sim_gk20a.h" 23#include "sim_gk20a.h"
24#include "gr_ctx_gk20a.h" 24#include "gr_ctx_gk20a.h"
25 25
26#include <nvgpu/log.h>
27
26int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr) 28int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr)
27{ 29{
28 int err = 0; 30 int err = 0;
@@ -239,8 +241,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr)
239 gk20a_dbg(gpu_dbg_info | gpu_dbg_fn, "finished querying grctx info from chiplib"); 241 gk20a_dbg(gpu_dbg_info | gpu_dbg_fn, "finished querying grctx info from chiplib");
240 return 0; 242 return 0;
241fail: 243fail:
242 gk20a_err(dev_from_gk20a(g), 244 nvgpu_err(g, "failed querying grctx info from chiplib");
243 "failed querying grctx info from chiplib");
244 return err; 245 return err;
245 246
246} 247}
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index af02491e..06374fb7 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -33,6 +33,7 @@
33#include <nvgpu/kmem.h> 33#include <nvgpu/kmem.h>
34#include <nvgpu/timers.h> 34#include <nvgpu/timers.h>
35#include <nvgpu/nvgpu_common.h> 35#include <nvgpu/nvgpu_common.h>
36#include <nvgpu/log.h>
36 37
37#include "gk20a.h" 38#include "gk20a.h"
38#include "kind_gk20a.h" 39#include "kind_gk20a.h"
@@ -126,81 +127,81 @@ void gk20a_fecs_dump_falcon_stats(struct gk20a *g)
126{ 127{
127 unsigned int i; 128 unsigned int i;
128 129
129 gk20a_err(dev_from_gk20a(g), "gr_fecs_os_r : %d", 130 nvgpu_err(g, "gr_fecs_os_r : %d",
130 gk20a_readl(g, gr_fecs_os_r())); 131 gk20a_readl(g, gr_fecs_os_r()));
131 gk20a_err(dev_from_gk20a(g), "gr_fecs_cpuctl_r : 0x%x", 132 nvgpu_err(g, "gr_fecs_cpuctl_r : 0x%x",
132 gk20a_readl(g, gr_fecs_cpuctl_r())); 133 gk20a_readl(g, gr_fecs_cpuctl_r()));
133 gk20a_err(dev_from_gk20a(g), "gr_fecs_idlestate_r : 0x%x", 134 nvgpu_err(g, "gr_fecs_idlestate_r : 0x%x",
134 gk20a_readl(g, gr_fecs_idlestate_r())); 135 gk20a_readl(g, gr_fecs_idlestate_r()));
135 gk20a_err(dev_from_gk20a(g), "gr_fecs_mailbox0_r : 0x%x", 136 nvgpu_err(g, "gr_fecs_mailbox0_r : 0x%x",
136 gk20a_readl(g, gr_fecs_mailbox0_r())); 137 gk20a_readl(g, gr_fecs_mailbox0_r()));
137 gk20a_err(dev_from_gk20a(g), "gr_fecs_mailbox1_r : 0x%x", 138 nvgpu_err(g, "gr_fecs_mailbox1_r : 0x%x",
138 gk20a_readl(g, gr_fecs_mailbox1_r())); 139 gk20a_readl(g, gr_fecs_mailbox1_r()));
139 gk20a_err(dev_from_gk20a(g), "gr_fecs_irqstat_r : 0x%x", 140 nvgpu_err(g, "gr_fecs_irqstat_r : 0x%x",
140 gk20a_readl(g, gr_fecs_irqstat_r())); 141 gk20a_readl(g, gr_fecs_irqstat_r()));
141 gk20a_err(dev_from_gk20a(g), "gr_fecs_irqmode_r : 0x%x", 142 nvgpu_err(g, "gr_fecs_irqmode_r : 0x%x",
142 gk20a_readl(g, gr_fecs_irqmode_r())); 143 gk20a_readl(g, gr_fecs_irqmode_r()));
143 gk20a_err(dev_from_gk20a(g), "gr_fecs_irqmask_r : 0x%x", 144 nvgpu_err(g, "gr_fecs_irqmask_r : 0x%x",
144 gk20a_readl(g, gr_fecs_irqmask_r())); 145 gk20a_readl(g, gr_fecs_irqmask_r()));
145 gk20a_err(dev_from_gk20a(g), "gr_fecs_irqdest_r : 0x%x", 146 nvgpu_err(g, "gr_fecs_irqdest_r : 0x%x",
146 gk20a_readl(g, gr_fecs_irqdest_r())); 147 gk20a_readl(g, gr_fecs_irqdest_r()));
147 gk20a_err(dev_from_gk20a(g), "gr_fecs_debug1_r : 0x%x", 148 nvgpu_err(g, "gr_fecs_debug1_r : 0x%x",
148 gk20a_readl(g, gr_fecs_debug1_r())); 149 gk20a_readl(g, gr_fecs_debug1_r()));
149 gk20a_err(dev_from_gk20a(g), "gr_fecs_debuginfo_r : 0x%x", 150 nvgpu_err(g, "gr_fecs_debuginfo_r : 0x%x",
150 gk20a_readl(g, gr_fecs_debuginfo_r())); 151 gk20a_readl(g, gr_fecs_debuginfo_r()));
151 152
152 for (i = 0; i < gr_fecs_ctxsw_mailbox__size_1_v(); i++) 153 for (i = 0; i < gr_fecs_ctxsw_mailbox__size_1_v(); i++)
153 gk20a_err(dev_from_gk20a(g), "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x", 154 nvgpu_err(g, "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x",
154 i, gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(i))); 155 i, gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(i)));
155 156
156 gk20a_err(dev_from_gk20a(g), "gr_fecs_engctl_r : 0x%x", 157 nvgpu_err(g, "gr_fecs_engctl_r : 0x%x",
157 gk20a_readl(g, gr_fecs_engctl_r())); 158 gk20a_readl(g, gr_fecs_engctl_r()));
158 gk20a_err(dev_from_gk20a(g), "gr_fecs_curctx_r : 0x%x", 159 nvgpu_err(g, "gr_fecs_curctx_r : 0x%x",
159 gk20a_readl(g, gr_fecs_curctx_r())); 160 gk20a_readl(g, gr_fecs_curctx_r()));
160 gk20a_err(dev_from_gk20a(g), "gr_fecs_nxtctx_r : 0x%x", 161 nvgpu_err(g, "gr_fecs_nxtctx_r : 0x%x",
161 gk20a_readl(g, gr_fecs_nxtctx_r())); 162 gk20a_readl(g, gr_fecs_nxtctx_r()));
162 163
163 gk20a_writel(g, gr_fecs_icd_cmd_r(), 164 gk20a_writel(g, gr_fecs_icd_cmd_r(),
164 gr_fecs_icd_cmd_opc_rreg_f() | 165 gr_fecs_icd_cmd_opc_rreg_f() |
165 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_IMB)); 166 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_IMB));
166 gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_IMB : 0x%x", 167 nvgpu_err(g, "FECS_FALCON_REG_IMB : 0x%x",
167 gk20a_readl(g, gr_fecs_icd_rdata_r())); 168 gk20a_readl(g, gr_fecs_icd_rdata_r()));
168 169
169 gk20a_writel(g, gr_fecs_icd_cmd_r(), 170 gk20a_writel(g, gr_fecs_icd_cmd_r(),
170 gr_fecs_icd_cmd_opc_rreg_f() | 171 gr_fecs_icd_cmd_opc_rreg_f() |
171 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_DMB)); 172 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_DMB));
172 gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_DMB : 0x%x", 173 nvgpu_err(g, "FECS_FALCON_REG_DMB : 0x%x",
173 gk20a_readl(g, gr_fecs_icd_rdata_r())); 174 gk20a_readl(g, gr_fecs_icd_rdata_r()));
174 175
175 gk20a_writel(g, gr_fecs_icd_cmd_r(), 176 gk20a_writel(g, gr_fecs_icd_cmd_r(),
176 gr_fecs_icd_cmd_opc_rreg_f() | 177 gr_fecs_icd_cmd_opc_rreg_f() |
177 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_CSW)); 178 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_CSW));
178 gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_CSW : 0x%x", 179 nvgpu_err(g, "FECS_FALCON_REG_CSW : 0x%x",
179 gk20a_readl(g, gr_fecs_icd_rdata_r())); 180 gk20a_readl(g, gr_fecs_icd_rdata_r()));
180 181
181 gk20a_writel(g, gr_fecs_icd_cmd_r(), 182 gk20a_writel(g, gr_fecs_icd_cmd_r(),
182 gr_fecs_icd_cmd_opc_rreg_f() | 183 gr_fecs_icd_cmd_opc_rreg_f() |
183 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_CTX)); 184 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_CTX));
184 gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_CTX : 0x%x", 185 nvgpu_err(g, "FECS_FALCON_REG_CTX : 0x%x",
185 gk20a_readl(g, gr_fecs_icd_rdata_r())); 186 gk20a_readl(g, gr_fecs_icd_rdata_r()));
186 187
187 gk20a_writel(g, gr_fecs_icd_cmd_r(), 188 gk20a_writel(g, gr_fecs_icd_cmd_r(),
188 gr_fecs_icd_cmd_opc_rreg_f() | 189 gr_fecs_icd_cmd_opc_rreg_f() |
189 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_EXCI)); 190 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_EXCI));
190 gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_EXCI : 0x%x", 191 nvgpu_err(g, "FECS_FALCON_REG_EXCI : 0x%x",
191 gk20a_readl(g, gr_fecs_icd_rdata_r())); 192 gk20a_readl(g, gr_fecs_icd_rdata_r()));
192 193
193 for (i = 0; i < 4; i++) { 194 for (i = 0; i < 4; i++) {
194 gk20a_writel(g, gr_fecs_icd_cmd_r(), 195 gk20a_writel(g, gr_fecs_icd_cmd_r(),
195 gr_fecs_icd_cmd_opc_rreg_f() | 196 gr_fecs_icd_cmd_opc_rreg_f() |
196 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_PC)); 197 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_PC));
197 gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_PC : 0x%x", 198 nvgpu_err(g, "FECS_FALCON_REG_PC : 0x%x",
198 gk20a_readl(g, gr_fecs_icd_rdata_r())); 199 gk20a_readl(g, gr_fecs_icd_rdata_r()));
199 200
200 gk20a_writel(g, gr_fecs_icd_cmd_r(), 201 gk20a_writel(g, gr_fecs_icd_cmd_r(),
201 gr_fecs_icd_cmd_opc_rreg_f() | 202 gr_fecs_icd_cmd_opc_rreg_f() |
202 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_SP)); 203 gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_SP));
203 gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_SP : 0x%x", 204 nvgpu_err(g, "FECS_FALCON_REG_SP : 0x%x",
204 gk20a_readl(g, gr_fecs_icd_rdata_r())); 205 gk20a_readl(g, gr_fecs_icd_rdata_r()));
205 } 206 }
206} 207}
@@ -373,7 +374,7 @@ int gr_gk20a_wait_idle(struct gk20a *g, unsigned long duration_ms,
373 374
374 } while (!nvgpu_timeout_expired(&timeout)); 375 } while (!nvgpu_timeout_expired(&timeout));
375 376
376 gk20a_err(dev_from_gk20a(g), 377 nvgpu_err(g,
377 "timeout, ctxsw busy : %d, gr busy : %d", 378 "timeout, ctxsw busy : %d, gr busy : %d",
378 ctxsw_active, gr_busy); 379 ctxsw_active, gr_busy);
379 380
@@ -408,7 +409,7 @@ int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long duration_ms,
408 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 409 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
409 } while (!nvgpu_timeout_expired(&timeout)); 410 } while (!nvgpu_timeout_expired(&timeout));
410 411
411 gk20a_err(dev_from_gk20a(g), 412 nvgpu_err(g,
412 "timeout, fe busy : %x", val); 413 "timeout, fe busy : %x", val);
413 414
414 return -EAGAIN; 415 return -EAGAIN;
@@ -466,7 +467,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
466 /* do no success check */ 467 /* do no success check */
467 break; 468 break;
468 default: 469 default:
469 gk20a_err(dev_from_gk20a(g), 470 nvgpu_err(g,
470 "invalid success opcode 0x%x", opc_success); 471 "invalid success opcode 0x%x", opc_success);
471 472
472 check = WAIT_UCODE_ERROR; 473 check = WAIT_UCODE_ERROR;
@@ -498,7 +499,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
498 /* do no check on fail*/ 499 /* do no check on fail*/
499 break; 500 break;
500 default: 501 default:
501 gk20a_err(dev_from_gk20a(g), 502 nvgpu_err(g,
502 "invalid fail opcode 0x%x", opc_fail); 503 "invalid fail opcode 0x%x", opc_fail);
503 check = WAIT_UCODE_ERROR; 504 check = WAIT_UCODE_ERROR;
504 break; 505 break;
@@ -512,13 +513,13 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
512 } 513 }
513 514
514 if (check == WAIT_UCODE_TIMEOUT) { 515 if (check == WAIT_UCODE_TIMEOUT) {
515 gk20a_err(dev_from_gk20a(g), 516 nvgpu_err(g,
516 "timeout waiting on ucode response"); 517 "timeout waiting on ucode response");
517 gk20a_fecs_dump_falcon_stats(g); 518 gk20a_fecs_dump_falcon_stats(g);
518 gk20a_gr_debug_dump(g->dev); 519 gk20a_gr_debug_dump(g->dev);
519 return -1; 520 return -1;
520 } else if (check == WAIT_UCODE_ERROR) { 521 } else if (check == WAIT_UCODE_ERROR) {
521 gk20a_err(dev_from_gk20a(g), 522 nvgpu_err(g,
522 "ucode method failed on mailbox=%d value=0x%08x", 523 "ucode method failed on mailbox=%d value=0x%08x",
523 mailbox_id, reg); 524 mailbox_id, reg);
524 gk20a_fecs_dump_falcon_stats(g); 525 gk20a_fecs_dump_falcon_stats(g);
@@ -735,7 +736,7 @@ static int gr_gk20a_fecs_ctx_bind_channel(struct gk20a *g,
735 .cond.ok = GR_IS_UCODE_OP_AND, 736 .cond.ok = GR_IS_UCODE_OP_AND,
736 .cond.fail = GR_IS_UCODE_OP_AND}, true); 737 .cond.fail = GR_IS_UCODE_OP_AND}, true);
737 if (ret) 738 if (ret)
738 gk20a_err(dev_from_gk20a(g), 739 nvgpu_err(g,
739 "bind channel instance failed"); 740 "bind channel instance failed");
740 741
741 return ret; 742 return ret;
@@ -786,13 +787,13 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c)
786 787
787 ret = gk20a_disable_channel_tsg(g, c); 788 ret = gk20a_disable_channel_tsg(g, c);
788 if (ret) { 789 if (ret) {
789 gk20a_err(dev_from_gk20a(g), "failed to disable channel/TSG\n"); 790 nvgpu_err(g, "failed to disable channel/TSG\n");
790 goto clean_up; 791 goto clean_up;
791 } 792 }
792 ret = gk20a_fifo_preempt(g, c); 793 ret = gk20a_fifo_preempt(g, c);
793 if (ret) { 794 if (ret) {
794 gk20a_enable_channel_tsg(g, c); 795 gk20a_enable_channel_tsg(g, c);
795 gk20a_err(dev_from_gk20a(g), "failed to preempt channel/TSG\n"); 796 nvgpu_err(g, "failed to preempt channel/TSG\n");
796 goto clean_up; 797 goto clean_up;
797 } 798 }
798 799
@@ -1493,7 +1494,7 @@ static int gr_gk20a_fecs_ctx_image_save(struct channel_gk20a *c, u32 save_type)
1493 }, true); 1494 }, true);
1494 1495
1495 if (ret) 1496 if (ret)
1496 gk20a_err(dev_from_gk20a(g), "save context image failed"); 1497 nvgpu_err(g, "save context image failed");
1497 1498
1498 return ret; 1499 return ret;
1499} 1500}
@@ -1821,7 +1822,7 @@ restore_fe_go_idle:
1821 1822
1822clean_up: 1823clean_up:
1823 if (err) 1824 if (err)
1824 gk20a_err(dev_from_gk20a(g), "fail"); 1825 nvgpu_err(g, "fail");
1825 else 1826 else
1826 gk20a_dbg_fn("done"); 1827 gk20a_dbg_fn("done");
1827 1828
@@ -1844,7 +1845,7 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
1844 gk20a_dbg_fn(""); 1845 gk20a_dbg_fn("");
1845 1846
1846 if (!ch_ctx->gr_ctx) { 1847 if (!ch_ctx->gr_ctx) {
1847 gk20a_err(dev_from_gk20a(g), "no graphics context allocated"); 1848 nvgpu_err(g, "no graphics context allocated");
1848 return -EFAULT; 1849 return -EFAULT;
1849 } 1850 }
1850 1851
@@ -1852,13 +1853,13 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
1852 1853
1853 ret = gk20a_disable_channel_tsg(g, c); 1854 ret = gk20a_disable_channel_tsg(g, c);
1854 if (ret) { 1855 if (ret) {
1855 gk20a_err(dev_from_gk20a(g), "failed to disable channel/TSG\n"); 1856 nvgpu_err(g, "failed to disable channel/TSG\n");
1856 goto out; 1857 goto out;
1857 } 1858 }
1858 ret = gk20a_fifo_preempt(g, c); 1859 ret = gk20a_fifo_preempt(g, c);
1859 if (ret) { 1860 if (ret) {
1860 gk20a_enable_channel_tsg(g, c); 1861 gk20a_enable_channel_tsg(g, c);
1861 gk20a_err(dev_from_gk20a(g), "failed to preempt channel/TSG\n"); 1862 nvgpu_err(g, "failed to preempt channel/TSG\n");
1862 goto out; 1863 goto out;
1863 } 1864 }
1864 1865
@@ -1904,7 +1905,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1904 gk20a_dbg_fn(""); 1905 gk20a_dbg_fn("");
1905 1906
1906 if (!ch_ctx->gr_ctx) { 1907 if (!ch_ctx->gr_ctx) {
1907 gk20a_err(dev_from_gk20a(g), "no graphics context allocated"); 1908 nvgpu_err(g, "no graphics context allocated");
1908 return -EFAULT; 1909 return -EFAULT;
1909 } 1910 }
1910 1911
@@ -1920,14 +1921,14 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1920 1921
1921 ret = gk20a_disable_channel_tsg(g, c); 1922 ret = gk20a_disable_channel_tsg(g, c);
1922 if (ret) { 1923 if (ret) {
1923 gk20a_err(dev_from_gk20a(g), "failed to disable channel/TSG\n"); 1924 nvgpu_err(g, "failed to disable channel/TSG\n");
1924 return ret; 1925 return ret;
1925 } 1926 }
1926 1927
1927 ret = gk20a_fifo_preempt(g, c); 1928 ret = gk20a_fifo_preempt(g, c);
1928 if (ret) { 1929 if (ret) {
1929 gk20a_enable_channel_tsg(g, c); 1930 gk20a_enable_channel_tsg(g, c);
1930 gk20a_err(dev_from_gk20a(g), "failed to preempt channel/TSG\n"); 1931 nvgpu_err(g, "failed to preempt channel/TSG\n");
1931 return ret; 1932 return ret;
1932 } 1933 }
1933 1934
@@ -1944,7 +1945,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1944 &pm_ctx->mem); 1945 &pm_ctx->mem);
1945 if (ret) { 1946 if (ret) {
1946 c->g->ops.fifo.enable_channel(c); 1947 c->g->ops.fifo.enable_channel(c);
1947 gk20a_err(dev_from_gk20a(g), 1948 nvgpu_err(g,
1948 "failed to allocate pm ctxt buffer"); 1949 "failed to allocate pm ctxt buffer");
1949 return ret; 1950 return ret;
1950 } 1951 }
@@ -1956,7 +1957,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1956 gk20a_mem_flag_none, true, 1957 gk20a_mem_flag_none, true,
1957 pm_ctx->mem.aperture); 1958 pm_ctx->mem.aperture);
1958 if (!pm_ctx->mem.gpu_va) { 1959 if (!pm_ctx->mem.gpu_va) {
1959 gk20a_err(dev_from_gk20a(g), 1960 nvgpu_err(g,
1960 "failed to map pm ctxt buffer"); 1961 "failed to map pm ctxt buffer");
1961 nvgpu_dma_free(g, &pm_ctx->mem); 1962 nvgpu_dma_free(g, &pm_ctx->mem);
1962 c->g->ops.fifo.enable_channel(c); 1963 c->g->ops.fifo.enable_channel(c);
@@ -2152,7 +2153,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2152 */ 2153 */
2153 if (ch_ctx->pm_ctx.pm_mode == ctxsw_prog_main_image_pm_mode_ctxsw_f()) { 2154 if (ch_ctx->pm_ctx.pm_mode == ctxsw_prog_main_image_pm_mode_ctxsw_f()) {
2154 if (ch_ctx->pm_ctx.mem.gpu_va == 0) { 2155 if (ch_ctx->pm_ctx.mem.gpu_va == 0) {
2155 gk20a_err(dev_from_gk20a(g), 2156 nvgpu_err(g,
2156 "context switched pm with no pm buffer!"); 2157 "context switched pm with no pm buffer!");
2157 nvgpu_mem_end(g, mem); 2158 nvgpu_mem_end(g, mem);
2158 return -EFAULT; 2159 return -EFAULT;
@@ -2201,7 +2202,6 @@ static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g)
2201{ 2202{
2202 struct mm_gk20a *mm = &g->mm; 2203 struct mm_gk20a *mm = &g->mm;
2203 struct vm_gk20a *vm = &mm->pmu.vm; 2204 struct vm_gk20a *vm = &mm->pmu.vm;
2204 struct device *d = dev_from_gk20a(g);
2205 struct gk20a_ctxsw_ucode_info *ucode_info = &g->ctxsw_ucode_info; 2205 struct gk20a_ctxsw_ucode_info *ucode_info = &g->ctxsw_ucode_info;
2206 int err; 2206 int err;
2207 2207
@@ -2220,7 +2220,7 @@ static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g)
2220 false, 2220 false,
2221 ucode_info->surface_desc.aperture); 2221 ucode_info->surface_desc.aperture);
2222 if (!ucode_info->surface_desc.gpu_va) { 2222 if (!ucode_info->surface_desc.gpu_va) {
2223 gk20a_err(d, "failed to update gmmu ptes\n"); 2223 nvgpu_err(g, "failed to update gmmu ptes\n");
2224 return -ENOMEM; 2224 return -ENOMEM;
2225 } 2225 }
2226 2226
@@ -2274,7 +2274,6 @@ static int gr_gk20a_copy_ctxsw_ucode_segments(
2274 2274
2275int gr_gk20a_init_ctxsw_ucode(struct gk20a *g) 2275int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
2276{ 2276{
2277 struct device *d = dev_from_gk20a(g);
2278 struct mm_gk20a *mm = &g->mm; 2277 struct mm_gk20a *mm = &g->mm;
2279 struct vm_gk20a *vm = &mm->pmu.vm; 2278 struct vm_gk20a *vm = &mm->pmu.vm;
2280 struct gk20a_ctxsw_bootloader_desc *fecs_boot_desc; 2279 struct gk20a_ctxsw_bootloader_desc *fecs_boot_desc;
@@ -2289,7 +2288,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
2289 2288
2290 fecs_fw = nvgpu_request_firmware(g, GK20A_FECS_UCODE_IMAGE, 0); 2289 fecs_fw = nvgpu_request_firmware(g, GK20A_FECS_UCODE_IMAGE, 0);
2291 if (!fecs_fw) { 2290 if (!fecs_fw) {
2292 gk20a_err(d, "failed to load fecs ucode!!"); 2291 nvgpu_err(g, "failed to load fecs ucode!!");
2293 return -ENOENT; 2292 return -ENOENT;
2294 } 2293 }
2295 2294
@@ -2300,7 +2299,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
2300 gpccs_fw = nvgpu_request_firmware(g, GK20A_GPCCS_UCODE_IMAGE, 0); 2299 gpccs_fw = nvgpu_request_firmware(g, GK20A_GPCCS_UCODE_IMAGE, 0);
2301 if (!gpccs_fw) { 2300 if (!gpccs_fw) {
2302 release_firmware(fecs_fw); 2301 release_firmware(fecs_fw);
2303 gk20a_err(d, "failed to load gpccs ucode!!"); 2302 nvgpu_err(g, "failed to load gpccs ucode!!");
2304 return -ENOENT; 2303 return -ENOENT;
2305 } 2304 }
2306 2305
@@ -2373,7 +2372,7 @@ void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g)
2373 retries--; 2372 retries--;
2374 } 2373 }
2375 if (!retries) { 2374 if (!retries) {
2376 gk20a_err(dev_from_gk20a(g), 2375 nvgpu_err(g,
2377 "arbiter idle timeout, status: %08x", 2376 "arbiter idle timeout, status: %08x",
2378 gk20a_readl(g, gr_fecs_ctxsw_status_1_r())); 2377 gk20a_readl(g, gr_fecs_ctxsw_status_1_r()));
2379 } 2378 }
@@ -2405,7 +2404,7 @@ void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g)
2405 val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r()); 2404 val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r());
2406 } 2405 }
2407 if (!retries) 2406 if (!retries)
2408 gk20a_err(dev_from_gk20a(g), "arbiter complete timeout"); 2407 nvgpu_err(g, "arbiter complete timeout");
2409 2408
2410 gk20a_writel(g, gr_fecs_current_ctx_r(), 2409 gk20a_writel(g, gr_fecs_current_ctx_r(),
2411 gr_fecs_current_ctx_ptr_f(inst_ptr >> 12) | 2410 gr_fecs_current_ctx_ptr_f(inst_ptr >> 12) |
@@ -2422,7 +2421,7 @@ void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g)
2422 val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r()); 2421 val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r());
2423 } 2422 }
2424 if (!retries) 2423 if (!retries)
2425 gk20a_err(dev_from_gk20a(g), "arbiter complete timeout"); 2424 nvgpu_err(g, "arbiter complete timeout");
2426} 2425}
2427 2426
2428void gr_gk20a_load_ctxsw_ucode_header(struct gk20a *g, u64 addr_base, 2427void gr_gk20a_load_ctxsw_ucode_header(struct gk20a *g, u64 addr_base,
@@ -2499,7 +2498,7 @@ void gr_gk20a_load_ctxsw_ucode_header(struct gk20a *g, u64 addr_base,
2499 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0); 2498 gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0);
2500 break; 2499 break;
2501 default: 2500 default:
2502 gk20a_err(dev_from_gk20a(g), 2501 nvgpu_err(g,
2503 "unknown falcon ucode boot signature 0x%08x" 2502 "unknown falcon ucode boot signature 0x%08x"
2504 " with reg_offset 0x%08x", 2503 " with reg_offset 0x%08x",
2505 segments->boot_signature, reg_offset); 2504 segments->boot_signature, reg_offset);
@@ -2631,7 +2630,7 @@ static int gr_gk20a_wait_ctxsw_ready(struct gk20a *g)
2631 eUcodeHandshakeInitComplete, 2630 eUcodeHandshakeInitComplete,
2632 GR_IS_UCODE_OP_SKIP, 0, false); 2631 GR_IS_UCODE_OP_SKIP, 0, false);
2633 if (ret) { 2632 if (ret) {
2634 gk20a_err(dev_from_gk20a(g), "falcon ucode init timeout"); 2633 nvgpu_err(g, "falcon ucode init timeout");
2635 return ret; 2634 return ret;
2636 } 2635 }
2637 2636
@@ -2666,7 +2665,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g)
2666 op.mailbox.ret = &g->gr.ctx_vars.golden_image_size; 2665 op.mailbox.ret = &g->gr.ctx_vars.golden_image_size;
2667 ret = gr_gk20a_submit_fecs_method_op(g, op, false); 2666 ret = gr_gk20a_submit_fecs_method_op(g, op, false);
2668 if (ret) { 2667 if (ret) {
2669 gk20a_err(dev_from_gk20a(g), 2668 nvgpu_err(g,
2670 "query golden image size failed"); 2669 "query golden image size failed");
2671 return ret; 2670 return ret;
2672 } 2671 }
@@ -2675,7 +2674,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g)
2675 op.mailbox.ret = &g->gr.ctx_vars.zcull_ctxsw_image_size; 2674 op.mailbox.ret = &g->gr.ctx_vars.zcull_ctxsw_image_size;
2676 ret = gr_gk20a_submit_fecs_method_op(g, op, false); 2675 ret = gr_gk20a_submit_fecs_method_op(g, op, false);
2677 if (ret) { 2676 if (ret) {
2678 gk20a_err(dev_from_gk20a(g), 2677 nvgpu_err(g,
2679 "query zcull ctx image size failed"); 2678 "query zcull ctx image size failed");
2680 return ret; 2679 return ret;
2681 } 2680 }
@@ -2684,7 +2683,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g)
2684 op.mailbox.ret = &g->gr.ctx_vars.pm_ctxsw_image_size; 2683 op.mailbox.ret = &g->gr.ctx_vars.pm_ctxsw_image_size;
2685 ret = gr_gk20a_submit_fecs_method_op(g, op, false); 2684 ret = gr_gk20a_submit_fecs_method_op(g, op, false);
2686 if (ret) { 2685 if (ret) {
2687 gk20a_err(dev_from_gk20a(g), 2686 nvgpu_err(g,
2688 "query pm ctx image size failed"); 2687 "query pm ctx image size failed");
2689 return ret; 2688 return ret;
2690 } 2689 }
@@ -2815,7 +2814,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2815 return 0; 2814 return 0;
2816 2815
2817 clean_up: 2816 clean_up:
2818 gk20a_err(dev_from_gk20a(g), "fail"); 2817 nvgpu_err(g, "fail");
2819 gr_gk20a_free_global_ctx_buffers(g); 2818 gr_gk20a_free_global_ctx_buffers(g);
2820 return -ENOMEM; 2819 return -ENOMEM;
2821} 2820}
@@ -2988,7 +2987,7 @@ static int gr_gk20a_alloc_tsg_gr_ctx(struct gk20a *g,
2988 int err; 2987 int err;
2989 2988
2990 if (!tsg->vm) { 2989 if (!tsg->vm) {
2991 gk20a_err(dev_from_gk20a(tsg->g), "No address space bound\n"); 2990 nvgpu_err(tsg->g, "No address space bound\n");
2992 return -ENOMEM; 2991 return -ENOMEM;
2993 } 2992 }
2994 2993
@@ -3029,7 +3028,7 @@ void gr_gk20a_free_gr_ctx(struct gk20a *g,
3029void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *tsg) 3028void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *tsg)
3030{ 3029{
3031 if (!tsg->vm) { 3030 if (!tsg->vm) {
3032 gk20a_err(dev_from_gk20a(tsg->g), "No address space bound\n"); 3031 nvgpu_err(tsg->g, "No address space bound\n");
3033 return; 3032 return;
3034 } 3033 }
3035 tsg->g->ops.gr.free_gr_ctx(tsg->g, tsg->vm, tsg->tsg_gr_ctx); 3034 tsg->g->ops.gr.free_gr_ctx(tsg->g, tsg->vm, tsg->tsg_gr_ctx);
@@ -3139,14 +3138,14 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3139 3138
3140 /* an address space needs to have been bound at this point.*/ 3139 /* an address space needs to have been bound at this point.*/
3141 if (!gk20a_channel_as_bound(c) && !c->vm) { 3140 if (!gk20a_channel_as_bound(c) && !c->vm) {
3142 gk20a_err(dev_from_gk20a(g), 3141 nvgpu_err(g,
3143 "not bound to address space at time" 3142 "not bound to address space at time"
3144 " of grctx allocation"); 3143 " of grctx allocation");
3145 return -EINVAL; 3144 return -EINVAL;
3146 } 3145 }
3147 3146
3148 if (!g->ops.gr.is_valid_class(g, args->class_num)) { 3147 if (!g->ops.gr.is_valid_class(g, args->class_num)) {
3149 gk20a_err(dev_from_gk20a(g), 3148 nvgpu_err(g,
3150 "invalid obj class 0x%x", args->class_num); 3149 "invalid obj class 0x%x", args->class_num);
3151 err = -EINVAL; 3150 err = -EINVAL;
3152 goto out; 3151 goto out;
@@ -3163,7 +3162,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3163 args->class_num, 3162 args->class_num,
3164 args->flags); 3163 args->flags);
3165 if (err) { 3164 if (err) {
3166 gk20a_err(dev_from_gk20a(g), 3165 nvgpu_err(g,
3167 "fail to allocate gr ctx buffer"); 3166 "fail to allocate gr ctx buffer");
3168 goto out; 3167 goto out;
3169 } 3168 }
@@ -3171,7 +3170,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3171 /*TBD: needs to be more subtle about which is 3170 /*TBD: needs to be more subtle about which is
3172 * being allocated as some are allowed to be 3171 * being allocated as some are allowed to be
3173 * allocated along same channel */ 3172 * allocated along same channel */
3174 gk20a_err(dev_from_gk20a(g), 3173 nvgpu_err(g,
3175 "too many classes alloc'd on same channel"); 3174 "too many classes alloc'd on same channel");
3176 err = -EINVAL; 3175 err = -EINVAL;
3177 goto out; 3176 goto out;
@@ -3184,7 +3183,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3184 args->class_num, 3183 args->class_num,
3185 args->flags); 3184 args->flags);
3186 if (err) { 3185 if (err) {
3187 gk20a_err(dev_from_gk20a(g), 3186 nvgpu_err(g,
3188 "fail to allocate TSG gr ctx buffer"); 3187 "fail to allocate TSG gr ctx buffer");
3189 gk20a_vm_put(tsg->vm); 3188 gk20a_vm_put(tsg->vm);
3190 tsg->vm = NULL; 3189 tsg->vm = NULL;
@@ -3200,7 +3199,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3200 /* commit gr ctx buffer */ 3199 /* commit gr ctx buffer */
3201 err = g->ops.gr.commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va); 3200 err = g->ops.gr.commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va);
3202 if (err) { 3201 if (err) {
3203 gk20a_err(dev_from_gk20a(g), 3202 nvgpu_err(g,
3204 "fail to commit gr ctx buffer"); 3203 "fail to commit gr ctx buffer");
3205 goto out; 3204 goto out;
3206 } 3205 }
@@ -3209,7 +3208,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3209 if (ch_ctx->patch_ctx.mem.sgt == NULL) { 3208 if (ch_ctx->patch_ctx.mem.sgt == NULL) {
3210 err = gr_gk20a_alloc_channel_patch_ctx(g, c); 3209 err = gr_gk20a_alloc_channel_patch_ctx(g, c);
3211 if (err) { 3210 if (err) {
3212 gk20a_err(dev_from_gk20a(g), 3211 nvgpu_err(g,
3213 "fail to allocate patch buffer"); 3212 "fail to allocate patch buffer");
3214 goto out; 3213 goto out;
3215 } 3214 }
@@ -3219,7 +3218,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3219 if (!ch_ctx->global_ctx_buffer_mapped) { 3218 if (!ch_ctx->global_ctx_buffer_mapped) {
3220 err = gr_gk20a_map_global_ctx_buffers(g, c); 3219 err = gr_gk20a_map_global_ctx_buffers(g, c);
3221 if (err) { 3220 if (err) {
3222 gk20a_err(dev_from_gk20a(g), 3221 nvgpu_err(g,
3223 "fail to map global ctx buffer"); 3222 "fail to map global ctx buffer");
3224 goto out; 3223 goto out;
3225 } 3224 }
@@ -3237,7 +3236,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3237 if (support_gk20a_pmu(g->dev)) { 3236 if (support_gk20a_pmu(g->dev)) {
3238 err = gk20a_pmu_disable_elpg(g); 3237 err = gk20a_pmu_disable_elpg(g);
3239 if (err) { 3238 if (err) {
3240 gk20a_err(dev_from_gk20a(g), 3239 nvgpu_err(g,
3241 "failed to set disable elpg"); 3240 "failed to set disable elpg");
3242 } 3241 }
3243 } 3242 }
@@ -3278,7 +3277,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3278 lockboost, true); 3277 lockboost, true);
3279 gr_gk20a_ctx_patch_write_end(g, ch_ctx); 3278 gr_gk20a_ctx_patch_write_end(g, ch_ctx);
3280 } else { 3279 } else {
3281 gk20a_err(dev_from_gk20a(g), 3280 nvgpu_err(g,
3282 "failed to set texlock for compute class"); 3281 "failed to set texlock for compute class");
3283 } 3282 }
3284 3283
@@ -3291,7 +3290,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3291 /* init golden image, ELPG enabled after this is done */ 3290 /* init golden image, ELPG enabled after this is done */
3292 err = gr_gk20a_init_golden_ctx_image(g, c); 3291 err = gr_gk20a_init_golden_ctx_image(g, c);
3293 if (err) { 3292 if (err) {
3294 gk20a_err(dev_from_gk20a(g), 3293 nvgpu_err(g,
3295 "fail to init golden ctx image"); 3294 "fail to init golden ctx image");
3296 goto out; 3295 goto out;
3297 } 3296 }
@@ -3301,14 +3300,14 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3301 err = gr_gk20a_elpg_protected_call(g, 3300 err = gr_gk20a_elpg_protected_call(g,
3302 gr_gk20a_load_golden_ctx_image(g, c)); 3301 gr_gk20a_load_golden_ctx_image(g, c));
3303 if (err) { 3302 if (err) {
3304 gk20a_err(dev_from_gk20a(g), 3303 nvgpu_err(g,
3305 "fail to load golden ctx image"); 3304 "fail to load golden ctx image");
3306 goto out; 3305 goto out;
3307 } 3306 }
3308 if (g->ops.fecs_trace.bind_channel && !c->vpr) { 3307 if (g->ops.fecs_trace.bind_channel && !c->vpr) {
3309 err = g->ops.fecs_trace.bind_channel(g, c); 3308 err = g->ops.fecs_trace.bind_channel(g, c);
3310 if (err) { 3309 if (err) {
3311 gk20a_warn(dev_from_gk20a(g), 3310 nvgpu_warn(g,
3312 "fail to bind channel for ctxsw trace"); 3311 "fail to bind channel for ctxsw trace");
3313 } 3312 }
3314 } 3313 }
@@ -3322,7 +3321,7 @@ out:
3322 can be reused so no need to release them. 3321 can be reused so no need to release them.
3323 2. golden image init and load is a one time thing so if 3322 2. golden image init and load is a one time thing so if
3324 they pass, no need to undo. */ 3323 they pass, no need to undo. */
3325 gk20a_err(dev_from_gk20a(g), "fail"); 3324 nvgpu_err(g, "fail");
3326 return err; 3325 return err;
3327} 3326}
3328 3327
@@ -3490,7 +3489,7 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
3490 gr->max_zcull_per_gpc_count = nvgpu_get_litter_value(g, GPU_LIT_NUM_ZCULL_BANKS); 3489 gr->max_zcull_per_gpc_count = nvgpu_get_litter_value(g, GPU_LIT_NUM_ZCULL_BANKS);
3491 3490
3492 if (!gr->gpc_count) { 3491 if (!gr->gpc_count) {
3493 gk20a_err(dev_from_gk20a(g), "gpc_count==0!"); 3492 nvgpu_err(g, "gpc_count==0!");
3494 goto clean_up; 3493 goto clean_up;
3495 } 3494 }
3496 3495
@@ -3846,7 +3845,7 @@ clean_up:
3846 nvgpu_kfree(g, sorted_to_unsorted_gpc_map); 3845 nvgpu_kfree(g, sorted_to_unsorted_gpc_map);
3847 3846
3848 if (ret) 3847 if (ret)
3849 gk20a_err(dev_from_gk20a(g), "fail"); 3848 nvgpu_err(g, "fail");
3850 else 3849 else
3851 gk20a_dbg_fn("done"); 3850 gk20a_dbg_fn("done");
3852 3851
@@ -3936,7 +3935,7 @@ static void gr_gk20a_detect_sm_arch(struct gk20a *g)
3936 if (raw_version == gr_gpc0_tpc0_sm_arch_spa_version_smkepler_lp_v()) 3935 if (raw_version == gr_gpc0_tpc0_sm_arch_spa_version_smkepler_lp_v())
3937 version = 0x320; /* SM 3.2 */ 3936 version = 0x320; /* SM 3.2 */
3938 else 3937 else
3939 gk20a_err(dev_from_gk20a(g), "Unknown SM version 0x%x\n", 3938 nvgpu_err(g, "Unknown SM version 0x%x\n",
3940 raw_version); 3939 raw_version);
3941 3940
3942 /* on Kepler, SM version == SPA version */ 3941 /* on Kepler, SM version == SPA version */
@@ -4030,7 +4029,7 @@ void gr_gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
4030 4029
4031 ret = gk20a_fifo_disable_engine_activity(g, gr_info, true); 4030 ret = gk20a_fifo_disable_engine_activity(g, gr_info, true);
4032 if (ret) { 4031 if (ret) {
4033 gk20a_err(dev_from_gk20a(g), 4032 nvgpu_err(g,
4034 "failed to disable gr engine activity"); 4033 "failed to disable gr engine activity");
4035 return; 4034 return;
4036 } 4035 }
@@ -4038,7 +4037,7 @@ void gr_gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
4038 ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g), 4037 ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g),
4039 GR_IDLE_CHECK_DEFAULT); 4038 GR_IDLE_CHECK_DEFAULT);
4040 if (ret) { 4039 if (ret) {
4041 gk20a_err(dev_from_gk20a(g), 4040 nvgpu_err(g,
4042 "failed to idle graphics"); 4041 "failed to idle graphics");
4043 goto clean_up; 4042 goto clean_up;
4044 } 4043 }
@@ -4049,7 +4048,7 @@ void gr_gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
4049clean_up: 4048clean_up:
4050 ret = gk20a_fifo_enable_engine_activity(g, gr_info); 4049 ret = gk20a_fifo_enable_engine_activity(g, gr_info);
4051 if (ret) { 4050 if (ret) {
4052 gk20a_err(dev_from_gk20a(g), 4051 nvgpu_err(g,
4053 "failed to enable gr engine activity\n"); 4052 "failed to enable gr engine activity\n");
4054 } 4053 }
4055} 4054}
@@ -4080,7 +4079,7 @@ int gr_gk20a_add_zbc(struct gk20a *g, struct gr_gk20a *gr,
4080 4079
4081 if (memcmp(c_tbl->color_l2, zbc_val->color_l2, 4080 if (memcmp(c_tbl->color_l2, zbc_val->color_l2,
4082 sizeof(zbc_val->color_l2))) { 4081 sizeof(zbc_val->color_l2))) {
4083 gk20a_err(dev_from_gk20a(g), 4082 nvgpu_err(g,
4084 "zbc l2 and ds color don't match with existing entries"); 4083 "zbc l2 and ds color don't match with existing entries");
4085 ret = -EINVAL; 4084 ret = -EINVAL;
4086 goto err_mutex; 4085 goto err_mutex;
@@ -4140,14 +4139,14 @@ int gr_gk20a_add_zbc(struct gk20a *g, struct gr_gk20a *gr,
4140 if (g->ops.gr.add_zbc_type_s) { 4139 if (g->ops.gr.add_zbc_type_s) {
4141 added = g->ops.gr.add_zbc_type_s(g, gr, zbc_val, &ret); 4140 added = g->ops.gr.add_zbc_type_s(g, gr, zbc_val, &ret);
4142 } else { 4141 } else {
4143 gk20a_err(dev_from_gk20a(g), 4142 nvgpu_err(g,
4144 "invalid zbc table type %d", zbc_val->type); 4143 "invalid zbc table type %d", zbc_val->type);
4145 ret = -EINVAL; 4144 ret = -EINVAL;
4146 goto err_mutex; 4145 goto err_mutex;
4147 } 4146 }
4148 break; 4147 break;
4149 default: 4148 default:
4150 gk20a_err(dev_from_gk20a(g), 4149 nvgpu_err(g,
4151 "invalid zbc table type %d", zbc_val->type); 4150 "invalid zbc table type %d", zbc_val->type);
4152 ret = -EINVAL; 4151 ret = -EINVAL;
4153 goto err_mutex; 4152 goto err_mutex;
@@ -4179,7 +4178,7 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
4179 break; 4178 break;
4180 case GK20A_ZBC_TYPE_COLOR: 4179 case GK20A_ZBC_TYPE_COLOR:
4181 if (index >= GK20A_ZBC_TABLE_SIZE) { 4180 if (index >= GK20A_ZBC_TABLE_SIZE) {
4182 gk20a_err(dev_from_gk20a(g), 4181 nvgpu_err(g,
4183 "invalid zbc color table index\n"); 4182 "invalid zbc color table index\n");
4184 return -EINVAL; 4183 return -EINVAL;
4185 } 4184 }
@@ -4194,7 +4193,7 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
4194 break; 4193 break;
4195 case GK20A_ZBC_TYPE_DEPTH: 4194 case GK20A_ZBC_TYPE_DEPTH:
4196 if (index >= GK20A_ZBC_TABLE_SIZE) { 4195 if (index >= GK20A_ZBC_TABLE_SIZE) {
4197 gk20a_err(dev_from_gk20a(g), 4196 nvgpu_err(g,
4198 "invalid zbc depth table index\n"); 4197 "invalid zbc depth table index\n");
4199 return -EINVAL; 4198 return -EINVAL;
4200 } 4199 }
@@ -4207,13 +4206,13 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
4207 return g->ops.gr.zbc_s_query_table(g, gr, 4206 return g->ops.gr.zbc_s_query_table(g, gr,
4208 query_params); 4207 query_params);
4209 } else { 4208 } else {
4210 gk20a_err(dev_from_gk20a(g), 4209 nvgpu_err(g,
4211 "invalid zbc table type\n"); 4210 "invalid zbc table type\n");
4212 return -EINVAL; 4211 return -EINVAL;
4213 } 4212 }
4214 break; 4213 break;
4215 default: 4214 default:
4216 gk20a_err(dev_from_gk20a(g), 4215 nvgpu_err(g,
4217 "invalid zbc table type\n"); 4216 "invalid zbc table type\n");
4218 return -EINVAL; 4217 return -EINVAL;
4219 } 4218 }
@@ -4303,7 +4302,7 @@ int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr)
4303 if (!err) 4302 if (!err)
4304 gr->max_default_color_index = 3; 4303 gr->max_default_color_index = 3;
4305 else { 4304 else {
4306 gk20a_err(dev_from_gk20a(g), 4305 nvgpu_err(g,
4307 "fail to load default zbc color table\n"); 4306 "fail to load default zbc color table\n");
4308 return err; 4307 return err;
4309 } 4308 }
@@ -4322,7 +4321,7 @@ int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr)
4322 if (!err) 4321 if (!err)
4323 gr->max_default_depth_index = 2; 4322 gr->max_default_depth_index = 2;
4324 else { 4323 else {
4325 gk20a_err(dev_from_gk20a(g), 4324 nvgpu_err(g,
4326 "fail to load default zbc depth table\n"); 4325 "fail to load default zbc depth table\n");
4327 return err; 4326 return err;
4328 } 4327 }
@@ -4349,7 +4348,7 @@ int _gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr,
4349 4348
4350 ret = gk20a_fifo_disable_engine_activity(g, gr_info, true); 4349 ret = gk20a_fifo_disable_engine_activity(g, gr_info, true);
4351 if (ret) { 4350 if (ret) {
4352 gk20a_err(dev_from_gk20a(g), 4351 nvgpu_err(g,
4353 "failed to disable gr engine activity"); 4352 "failed to disable gr engine activity");
4354 return ret; 4353 return ret;
4355 } 4354 }
@@ -4357,7 +4356,7 @@ int _gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr,
4357 ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g), 4356 ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g),
4358 GR_IDLE_CHECK_DEFAULT); 4357 GR_IDLE_CHECK_DEFAULT);
4359 if (ret) { 4358 if (ret) {
4360 gk20a_err(dev_from_gk20a(g), 4359 nvgpu_err(g,
4361 "failed to idle graphics"); 4360 "failed to idle graphics");
4362 goto clean_up; 4361 goto clean_up;
4363 } 4362 }
@@ -4366,7 +4365,7 @@ int _gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr,
4366 4365
4367clean_up: 4366clean_up:
4368 if (gk20a_fifo_enable_engine_activity(g, gr_info)) { 4367 if (gk20a_fifo_enable_engine_activity(g, gr_info)) {
4369 gk20a_err(dev_from_gk20a(g), 4368 nvgpu_err(g,
4370 "failed to enable gr engine activity"); 4369 "failed to enable gr engine activity");
4371 } 4370 }
4372 4371
@@ -4400,7 +4399,7 @@ void gr_gk20a_init_blcg_mode(struct gk20a *g, u32 mode, u32 engine)
4400 therm_gate_ctrl_blk_clk_auto_f()); 4399 therm_gate_ctrl_blk_clk_auto_f());
4401 break; 4400 break;
4402 default: 4401 default:
4403 gk20a_err(dev_from_gk20a(g), 4402 nvgpu_err(g,
4404 "invalid blcg mode %d", mode); 4403 "invalid blcg mode %d", mode);
4405 return; 4404 return;
4406 } 4405 }
@@ -4435,7 +4434,7 @@ void gr_gk20a_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine)
4435 therm_gate_ctrl_eng_clk_auto_f()); 4434 therm_gate_ctrl_eng_clk_auto_f());
4436 break; 4435 break;
4437 default: 4436 default:
4438 gk20a_err(dev_from_gk20a(g), 4437 nvgpu_err(g,
4439 "invalid elcg mode %d", mode); 4438 "invalid elcg mode %d", mode);
4440 } 4439 }
4441 4440
@@ -4462,7 +4461,7 @@ void gr_gk20a_init_cg_mode(struct gk20a *g, u32 cgmode, u32 mode_config)
4462 g->ops.gr.init_elcg_mode(g, mode_config, 4461 g->ops.gr.init_elcg_mode(g, mode_config,
4463 active_engine_id); 4462 active_engine_id);
4464 else 4463 else
4465 gk20a_err(dev_from_gk20a(g), "invalid cg mode %d %d", cgmode, mode_config); 4464 nvgpu_err(g, "invalid cg mode %d %d", cgmode, mode_config);
4466 } 4465 }
4467} 4466}
4468 4467
@@ -4592,7 +4591,7 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr)
4592 zcull_map_tiles = nvgpu_kzalloc(g, zcull_alloc_num * sizeof(u32)); 4591 zcull_map_tiles = nvgpu_kzalloc(g, zcull_alloc_num * sizeof(u32));
4593 4592
4594 if (!zcull_map_tiles) { 4593 if (!zcull_map_tiles) {
4595 gk20a_err(dev_from_gk20a(g), 4594 nvgpu_err(g,
4596 "failed to allocate zcull map titles"); 4595 "failed to allocate zcull map titles");
4597 return -ENOMEM; 4596 return -ENOMEM;
4598 } 4597 }
@@ -4600,7 +4599,7 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr)
4600 zcull_bank_counters = nvgpu_kzalloc(g, zcull_alloc_num * sizeof(u32)); 4599 zcull_bank_counters = nvgpu_kzalloc(g, zcull_alloc_num * sizeof(u32));
4601 4600
4602 if (!zcull_bank_counters) { 4601 if (!zcull_bank_counters) {
4603 gk20a_err(dev_from_gk20a(g), 4602 nvgpu_err(g,
4604 "failed to allocate zcull bank counters"); 4603 "failed to allocate zcull bank counters");
4605 nvgpu_kfree(g, zcull_map_tiles); 4604 nvgpu_kfree(g, zcull_map_tiles);
4606 return -ENOMEM; 4605 return -ENOMEM;
@@ -4626,7 +4625,7 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr)
4626 4625
4627 if (gpc_zcull_count != gr->max_zcull_per_gpc_count && 4626 if (gpc_zcull_count != gr->max_zcull_per_gpc_count &&
4628 gpc_zcull_count < gpc_tpc_count) { 4627 gpc_zcull_count < gpc_tpc_count) {
4629 gk20a_err(dev_from_gk20a(g), 4628 nvgpu_err(g,
4630 "zcull_banks (%d) less than tpcs (%d) for gpc (%d)", 4629 "zcull_banks (%d) less than tpcs (%d) for gpc (%d)",
4631 gpc_zcull_count, gpc_tpc_count, gpc_index); 4630 gpc_zcull_count, gpc_tpc_count, gpc_index);
4632 return -EINVAL; 4631 return -EINVAL;
@@ -4991,7 +4990,7 @@ static int gk20a_init_gr_prepare(struct gk20a *g)
4991 if (!g->gr.ctx_vars.valid) { 4990 if (!g->gr.ctx_vars.valid) {
4992 err = gr_gk20a_init_ctx_vars(g, &g->gr); 4991 err = gr_gk20a_init_ctx_vars(g, &g->gr);
4993 if (err) 4992 if (err)
4994 gk20a_err(dev_from_gk20a(g), 4993 nvgpu_err(g,
4995 "fail to load gr init ctx"); 4994 "fail to load gr init ctx");
4996 } 4995 }
4997 return err; 4996 return err;
@@ -5024,7 +5023,7 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g)
5024 udelay(CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT); 5023 udelay(CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT);
5025 } while (!nvgpu_timeout_expired(&timeout)); 5024 } while (!nvgpu_timeout_expired(&timeout));
5026 5025
5027 gk20a_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout"); 5026 nvgpu_err(g, "Falcon mem scrubbing timeout");
5028 return -ETIMEDOUT; 5027 return -ETIMEDOUT;
5029} 5028}
5030 5029
@@ -5042,7 +5041,7 @@ static int gr_gk20a_init_ctxsw(struct gk20a *g)
5042 5041
5043out: 5042out:
5044 if (err) 5043 if (err)
5045 gk20a_err(dev_from_gk20a(g), "fail"); 5044 nvgpu_err(g, "fail");
5046 else 5045 else
5047 gk20a_dbg_fn("done"); 5046 gk20a_dbg_fn("done");
5048 5047
@@ -5076,7 +5075,7 @@ static int gk20a_init_gr_reset_enable_hw(struct gk20a *g)
5076 5075
5077out: 5076out:
5078 if (err) 5077 if (err)
5079 gk20a_err(dev_from_gk20a(g), "fail"); 5078 nvgpu_err(g, "fail");
5080 else 5079 else
5081 gk20a_dbg_fn("done"); 5080 gk20a_dbg_fn("done");
5082 5081
@@ -5094,7 +5093,7 @@ static int gr_gk20a_init_access_map(struct gk20a *g)
5094 unsigned int num_entries = 0; 5093 unsigned int num_entries = 0;
5095 5094
5096 if (nvgpu_mem_begin(g, mem)) { 5095 if (nvgpu_mem_begin(g, mem)) {
5097 gk20a_err(dev_from_gk20a(g), 5096 nvgpu_err(g,
5098 "failed to map priv access map memory"); 5097 "failed to map priv access map memory");
5099 return -ENOMEM; 5098 return -ENOMEM;
5100 } 5099 }
@@ -5188,7 +5187,7 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g)
5188 return 0; 5187 return 0;
5189 5188
5190clean_up: 5189clean_up:
5191 gk20a_err(dev_from_gk20a(g), "fail"); 5190 nvgpu_err(g, "fail");
5192 gk20a_remove_gr_support(gr); 5191 gk20a_remove_gr_support(gr);
5193 return err; 5192 return err;
5194} 5193}
@@ -5198,7 +5197,6 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g)
5198 struct pmu_gk20a *pmu = &g->pmu; 5197 struct pmu_gk20a *pmu = &g->pmu;
5199 struct mm_gk20a *mm = &g->mm; 5198 struct mm_gk20a *mm = &g->mm;
5200 struct vm_gk20a *vm = &mm->pmu.vm; 5199 struct vm_gk20a *vm = &mm->pmu.vm;
5201 struct device *d = dev_from_gk20a(g);
5202 int err = 0; 5200 int err = 0;
5203 5201
5204 u32 size; 5202 u32 size;
@@ -5209,7 +5207,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g)
5209 5207
5210 err = gr_gk20a_fecs_get_reglist_img_size(g, &size); 5208 err = gr_gk20a_fecs_get_reglist_img_size(g, &size);
5211 if (err) { 5209 if (err) {
5212 gk20a_err(dev_from_gk20a(g), 5210 nvgpu_err(g,
5213 "fail to query fecs pg buffer size"); 5211 "fail to query fecs pg buffer size");
5214 return err; 5212 return err;
5215 } 5213 }
@@ -5217,7 +5215,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g)
5217 if (!pmu->pg_buf.cpu_va) { 5215 if (!pmu->pg_buf.cpu_va) {
5218 err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pg_buf); 5216 err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pg_buf);
5219 if (err) { 5217 if (err) {
5220 gk20a_err(d, "failed to allocate memory\n"); 5218 nvgpu_err(g, "failed to allocate memory\n");
5221 return -ENOMEM; 5219 return -ENOMEM;
5222 } 5220 }
5223 } 5221 }
@@ -5225,14 +5223,14 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g)
5225 5223
5226 err = gr_gk20a_fecs_set_reglist_bind_inst(g, &mm->pmu.inst_block); 5224 err = gr_gk20a_fecs_set_reglist_bind_inst(g, &mm->pmu.inst_block);
5227 if (err) { 5225 if (err) {
5228 gk20a_err(dev_from_gk20a(g), 5226 nvgpu_err(g,
5229 "fail to bind pmu inst to gr"); 5227 "fail to bind pmu inst to gr");
5230 return err; 5228 return err;
5231 } 5229 }
5232 5230
5233 err = gr_gk20a_fecs_set_reglist_virtual_addr(g, pmu->pg_buf.gpu_va); 5231 err = gr_gk20a_fecs_set_reglist_virtual_addr(g, pmu->pg_buf.gpu_va);
5234 if (err) { 5232 if (err) {
5235 gk20a_err(dev_from_gk20a(g), 5233 nvgpu_err(g,
5236 "fail to set pg buffer pmu va"); 5234 "fail to set pg buffer pmu va");
5237 return err; 5235 return err;
5238 } 5236 }
@@ -5496,21 +5494,21 @@ int gk20a_gr_reset(struct gk20a *g)
5496 size = 0; 5494 size = 0;
5497 err = gr_gk20a_fecs_get_reglist_img_size(g, &size); 5495 err = gr_gk20a_fecs_get_reglist_img_size(g, &size);
5498 if (err) { 5496 if (err) {
5499 gk20a_err(dev_from_gk20a(g), 5497 nvgpu_err(g,
5500 "fail to query fecs pg buffer size"); 5498 "fail to query fecs pg buffer size");
5501 return err; 5499 return err;
5502 } 5500 }
5503 5501
5504 err = gr_gk20a_fecs_set_reglist_bind_inst(g, &g->mm.pmu.inst_block); 5502 err = gr_gk20a_fecs_set_reglist_bind_inst(g, &g->mm.pmu.inst_block);
5505 if (err) { 5503 if (err) {
5506 gk20a_err(dev_from_gk20a(g), 5504 nvgpu_err(g,
5507 "fail to bind pmu inst to gr"); 5505 "fail to bind pmu inst to gr");
5508 return err; 5506 return err;
5509 } 5507 }
5510 5508
5511 err = gr_gk20a_fecs_set_reglist_virtual_addr(g, g->pmu.pg_buf.gpu_va); 5509 err = gr_gk20a_fecs_set_reglist_virtual_addr(g, g->pmu.pg_buf.gpu_va);
5512 if (err) { 5510 if (err) {
5513 gk20a_err(dev_from_gk20a(g), 5511 nvgpu_err(g,
5514 "fail to set pg buffer pmu va"); 5512 "fail to set pg buffer pmu va");
5515 return err; 5513 return err;
5516 } 5514 }
@@ -5593,7 +5591,7 @@ static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g,
5593 gk20a_dbg_fn(""); 5591 gk20a_dbg_fn("");
5594 gk20a_gr_set_error_notifier(g, isr_data, 5592 gk20a_gr_set_error_notifier(g, isr_data,
5595 NVGPU_CHANNEL_GR_SEMAPHORE_TIMEOUT); 5593 NVGPU_CHANNEL_GR_SEMAPHORE_TIMEOUT);
5596 gk20a_err(dev_from_gk20a(g), 5594 nvgpu_err(g,
5597 "gr semaphore timeout\n"); 5595 "gr semaphore timeout\n");
5598 return -EINVAL; 5596 return -EINVAL;
5599} 5597}
@@ -5605,7 +5603,7 @@ static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g,
5605 gk20a_gr_set_error_notifier(g, isr_data, 5603 gk20a_gr_set_error_notifier(g, isr_data,
5606 NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY); 5604 NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY);
5607 /* This is an unrecoverable error, reset is needed */ 5605 /* This is an unrecoverable error, reset is needed */
5608 gk20a_err(dev_from_gk20a(g), 5606 nvgpu_err(g,
5609 "gr semaphore timeout\n"); 5607 "gr semaphore timeout\n");
5610 return -EINVAL; 5608 return -EINVAL;
5611} 5609}
@@ -5619,7 +5617,7 @@ static int gk20a_gr_handle_illegal_method(struct gk20a *g,
5619 if (ret) { 5617 if (ret) {
5620 gk20a_gr_set_error_notifier(g, isr_data, 5618 gk20a_gr_set_error_notifier(g, isr_data,
5621 NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY); 5619 NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY);
5622 gk20a_err(dev_from_gk20a(g), "invalid method class 0x%08x" 5620 nvgpu_err(g, "invalid method class 0x%08x"
5623 ", offset 0x%08x address 0x%08x\n", 5621 ", offset 0x%08x address 0x%08x\n",
5624 isr_data->class_num, isr_data->offset, isr_data->addr); 5622 isr_data->class_num, isr_data->offset, isr_data->addr);
5625 } 5623 }
@@ -5632,7 +5630,7 @@ static int gk20a_gr_handle_illegal_class(struct gk20a *g,
5632 gk20a_dbg_fn(""); 5630 gk20a_dbg_fn("");
5633 gk20a_gr_set_error_notifier(g, isr_data, 5631 gk20a_gr_set_error_notifier(g, isr_data,
5634 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); 5632 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
5635 gk20a_err(dev_from_gk20a(g), 5633 nvgpu_err(g,
5636 "invalid class 0x%08x, offset 0x%08x", 5634 "invalid class 0x%08x, offset 0x%08x",
5637 isr_data->class_num, isr_data->offset); 5635 isr_data->class_num, isr_data->offset);
5638 return -EINVAL; 5636 return -EINVAL;
@@ -5649,14 +5647,14 @@ int gk20a_gr_handle_fecs_error(struct gk20a *g, struct channel_gk20a *ch,
5649 if (!gr_fecs_intr) 5647 if (!gr_fecs_intr)
5650 return 0; 5648 return 0;
5651 5649
5652 gk20a_err(dev_from_gk20a(g), 5650 nvgpu_err(g,
5653 "unhandled fecs error interrupt 0x%08x for channel %u", 5651 "unhandled fecs error interrupt 0x%08x for channel %u",
5654 gr_fecs_intr, isr_data->chid); 5652 gr_fecs_intr, isr_data->chid);
5655 5653
5656 if (gr_fecs_intr & gr_fecs_host_int_status_umimp_firmware_method_f(1)) { 5654 if (gr_fecs_intr & gr_fecs_host_int_status_umimp_firmware_method_f(1)) {
5657 gk20a_gr_set_error_notifier(g, isr_data, 5655 gk20a_gr_set_error_notifier(g, isr_data,
5658 NVGPU_CHANNEL_FECS_ERR_UNIMP_FIRMWARE_METHOD); 5656 NVGPU_CHANNEL_FECS_ERR_UNIMP_FIRMWARE_METHOD);
5659 gk20a_err(dev_from_gk20a(g), 5657 nvgpu_err(g,
5660 "firmware method error 0x%08x for offset 0x%04x", 5658 "firmware method error 0x%08x for offset 0x%04x",
5661 gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(6)), 5659 gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(6)),
5662 isr_data->data_lo); 5660 isr_data->data_lo);
@@ -5678,7 +5676,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g,
5678 gr_class_error_code_v(gk20a_readl(g, gr_class_error_r())); 5676 gr_class_error_code_v(gk20a_readl(g, gr_class_error_r()));
5679 gk20a_gr_set_error_notifier(g, isr_data, 5677 gk20a_gr_set_error_notifier(g, isr_data,
5680 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); 5678 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
5681 gk20a_err(dev_from_gk20a(g), 5679 nvgpu_err(g,
5682 "class error 0x%08x, offset 0x%08x," 5680 "class error 0x%08x, offset 0x%08x,"
5683 " unhandled intr 0x%08x for channel %u\n", 5681 " unhandled intr 0x%08x for channel %u\n",
5684 isr_data->class_num, isr_data->offset, 5682 isr_data->class_num, isr_data->offset,
@@ -5694,7 +5692,7 @@ static int gk20a_gr_handle_firmware_method(struct gk20a *g,
5694 5692
5695 gk20a_gr_set_error_notifier(g, isr_data, 5693 gk20a_gr_set_error_notifier(g, isr_data,
5696 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); 5694 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
5697 gk20a_err(dev_from_gk20a(g), 5695 nvgpu_err(g,
5698 "firmware method 0x%08x, offset 0x%08x for channel %u\n", 5696 "firmware method 0x%08x, offset 0x%08x for channel %u\n",
5699 isr_data->class_num, isr_data->offset, 5697 isr_data->class_num, isr_data->offset,
5700 isr_data->chid); 5698 isr_data->chid);
@@ -5772,7 +5770,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g,
5772 /* validate offset */ 5770 /* validate offset */
5773 if (offset + sizeof(struct share_buffer_head) > buffer_size || 5771 if (offset + sizeof(struct share_buffer_head) > buffer_size ||
5774 offset + sizeof(struct share_buffer_head) < offset) { 5772 offset + sizeof(struct share_buffer_head) < offset) {
5775 gk20a_err(dev_from_gk20a(g), 5773 nvgpu_err(g,
5776 "cyclestats buffer overrun at offset 0x%x\n", 5774 "cyclestats buffer overrun at offset 0x%x\n",
5777 offset); 5775 offset);
5778 break; 5776 break;
@@ -5790,7 +5788,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g,
5790 if (sh_hdr->size < min_element_size || 5788 if (sh_hdr->size < min_element_size ||
5791 offset + sh_hdr->size > buffer_size || 5789 offset + sh_hdr->size > buffer_size ||
5792 offset + sh_hdr->size < offset) { 5790 offset + sh_hdr->size < offset) {
5793 gk20a_err(dev_from_gk20a(g), 5791 nvgpu_err(g,
5794 "bad cyclestate buffer header size at offset 0x%x\n", 5792 "bad cyclestate buffer header size at offset 0x%x\n",
5795 offset); 5793 offset);
5796 sh_hdr->failed = true; 5794 sh_hdr->failed = true;
@@ -5814,7 +5812,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g,
5814 u64 v; 5812 u64 v;
5815 5813
5816 if (!valid) { 5814 if (!valid) {
5817 gk20a_err(dev_from_gk20a(g), 5815 nvgpu_err(g,
5818 "invalid cycletstats op offset: 0x%x\n", 5816 "invalid cycletstats op offset: 0x%x\n",
5819 op_elem->offset_bar0); 5817 op_elem->offset_bar0);
5820 5818
@@ -6070,7 +6068,7 @@ static int gk20a_gr_update_sm_error_state(struct gk20a *g,
6070 6068
6071 err = gr_gk20a_disable_ctxsw(g); 6069 err = gr_gk20a_disable_ctxsw(g);
6072 if (err) { 6070 if (err) {
6073 gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw\n"); 6071 nvgpu_err(g, "unable to stop gr ctxsw\n");
6074 goto fail; 6072 goto fail;
6075 } 6073 }
6076 6074
@@ -6130,7 +6128,7 @@ static int gk20a_gr_clear_sm_error_state(struct gk20a *g,
6130 6128
6131 err = gr_gk20a_disable_ctxsw(g); 6129 err = gr_gk20a_disable_ctxsw(g);
6132 if (err) { 6130 if (err) {
6133 gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw\n"); 6131 nvgpu_err(g, "unable to stop gr ctxsw\n");
6134 goto fail; 6132 goto fail;
6135 } 6133 }
6136 6134
@@ -6183,7 +6181,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
6183 warp_esr = g->ops.gr.mask_hww_warp_esr(warp_esr); 6181 warp_esr = g->ops.gr.mask_hww_warp_esr(warp_esr);
6184 6182
6185 if (!sm_debugger_attached) { 6183 if (!sm_debugger_attached) {
6186 gk20a_err(dev_from_gk20a(g), "sm hww global %08x warp %08x\n", 6184 nvgpu_err(g, "sm hww global %08x warp %08x\n",
6187 global_esr, warp_esr); 6185 global_esr, warp_esr);
6188 return -EFAULT; 6186 return -EFAULT;
6189 } 6187 }
@@ -6203,7 +6201,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
6203 &early_exit, 6201 &early_exit,
6204 &ignore_debugger); 6202 &ignore_debugger);
6205 if (ret) { 6203 if (ret) {
6206 gk20a_err(dev_from_gk20a(g), "could not pre-process sm error!\n"); 6204 nvgpu_err(g, "could not pre-process sm error!\n");
6207 return ret; 6205 return ret;
6208 } 6206 }
6209 } 6207 }
@@ -6237,7 +6235,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
6237 if (do_warp_sync) { 6235 if (do_warp_sync) {
6238 ret = gk20a_gr_lock_down_sm(g, gpc, tpc, global_mask, true); 6236 ret = gk20a_gr_lock_down_sm(g, gpc, tpc, global_mask, true);
6239 if (ret) { 6237 if (ret) {
6240 gk20a_err(dev_from_gk20a(g), "sm did not lock down!\n"); 6238 nvgpu_err(g, "sm did not lock down!\n");
6241 return ret; 6239 return ret;
6242 } 6240 }
6243 } 6241 }
@@ -6389,7 +6387,6 @@ static int gk20a_gr_post_bpt_events(struct gk20a *g, struct channel_gk20a *ch,
6389 6387
6390int gk20a_gr_isr(struct gk20a *g) 6388int gk20a_gr_isr(struct gk20a *g)
6391{ 6389{
6392 struct device *dev = dev_from_gk20a(g);
6393 struct gr_gk20a_isr_data isr_data; 6390 struct gr_gk20a_isr_data isr_data;
6394 u32 grfifo_ctl; 6391 u32 grfifo_ctl;
6395 u32 obj_table; 6392 u32 obj_table;
@@ -6520,14 +6517,14 @@ int gk20a_gr_isr(struct gk20a *g)
6520 6517
6521 if (exception & gr_exception_fe_m()) { 6518 if (exception & gr_exception_fe_m()) {
6522 u32 fe = gk20a_readl(g, gr_fe_hww_esr_r()); 6519 u32 fe = gk20a_readl(g, gr_fe_hww_esr_r());
6523 gk20a_err(dev, "fe warning %08x", fe); 6520 nvgpu_err(g, "fe warning %08x", fe);
6524 gk20a_writel(g, gr_fe_hww_esr_r(), fe); 6521 gk20a_writel(g, gr_fe_hww_esr_r(), fe);
6525 need_reset |= -EFAULT; 6522 need_reset |= -EFAULT;
6526 } 6523 }
6527 6524
6528 if (exception & gr_exception_memfmt_m()) { 6525 if (exception & gr_exception_memfmt_m()) {
6529 u32 memfmt = gk20a_readl(g, gr_memfmt_hww_esr_r()); 6526 u32 memfmt = gk20a_readl(g, gr_memfmt_hww_esr_r());
6530 gk20a_err(dev, "memfmt exception %08x", memfmt); 6527 nvgpu_err(g, "memfmt exception %08x", memfmt);
6531 gk20a_writel(g, gr_memfmt_hww_esr_r(), memfmt); 6528 gk20a_writel(g, gr_memfmt_hww_esr_r(), memfmt);
6532 need_reset |= -EFAULT; 6529 need_reset |= -EFAULT;
6533 } 6530 }
@@ -6556,7 +6553,7 @@ int gk20a_gr_isr(struct gk20a *g)
6556 6553
6557 if (exception & gr_exception_ds_m()) { 6554 if (exception & gr_exception_ds_m()) {
6558 u32 ds = gk20a_readl(g, gr_ds_hww_esr_r()); 6555 u32 ds = gk20a_readl(g, gr_ds_hww_esr_r());
6559 gk20a_err(dev, "ds exception %08x", ds); 6556 nvgpu_err(g, "ds exception %08x", ds);
6560 gk20a_writel(g, gr_ds_hww_esr_r(), ds); 6557 gk20a_writel(g, gr_ds_hww_esr_r(), ds);
6561 need_reset |= -EFAULT; 6558 need_reset |= -EFAULT;
6562 } 6559 }
@@ -6565,7 +6562,7 @@ int gk20a_gr_isr(struct gk20a *g)
6565 gr_intr &= ~gr_intr_exception_pending_f(); 6562 gr_intr &= ~gr_intr_exception_pending_f();
6566 6563
6567 if (need_reset) { 6564 if (need_reset) {
6568 gk20a_err(dev, "set gr exception notifier"); 6565 nvgpu_err(g, "set gr exception notifier");
6569 gk20a_gr_set_error_notifier(g, &isr_data, 6566 gk20a_gr_set_error_notifier(g, &isr_data,
6570 NVGPU_CHANNEL_GR_EXCEPTION); 6567 NVGPU_CHANNEL_GR_EXCEPTION);
6571 } 6568 }
@@ -6586,7 +6583,7 @@ int gk20a_gr_isr(struct gk20a *g)
6586 if (gr_intr && !ch) { 6583 if (gr_intr && !ch) {
6587 /* Clear interrupts for unused channel. This is 6584 /* Clear interrupts for unused channel. This is
6588 probably an interrupt during gk20a_free_channel() */ 6585 probably an interrupt during gk20a_free_channel() */
6589 gk20a_err(dev_from_gk20a(g), 6586 nvgpu_err(g,
6590 "unhandled gr interrupt 0x%08x for unreferenceable channel, clearing", 6587 "unhandled gr interrupt 0x%08x for unreferenceable channel, clearing",
6591 gr_intr); 6588 gr_intr);
6592 gk20a_writel(g, gr_intr_r(), gr_intr); 6589 gk20a_writel(g, gr_intr_r(), gr_intr);
@@ -6598,7 +6595,7 @@ int gk20a_gr_isr(struct gk20a *g)
6598 gr_gpfifo_ctl_semaphore_access_f(1)); 6595 gr_gpfifo_ctl_semaphore_access_f(1));
6599 6596
6600 if (gr_intr) 6597 if (gr_intr)
6601 gk20a_err(dev_from_gk20a(g), 6598 nvgpu_err(g,
6602 "unhandled gr interrupt 0x%08x", gr_intr); 6599 "unhandled gr interrupt 0x%08x", gr_intr);
6603 6600
6604 /* Posting of BPT events should be the last thing in this function */ 6601 /* Posting of BPT events should be the last thing in this function */
@@ -7330,13 +7327,13 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
7330 context = (u8 *)context_buffer; 7327 context = (u8 *)context_buffer;
7331 /* sanity check main header */ 7328 /* sanity check main header */
7332 if (!check_main_image_header_magic(context)) { 7329 if (!check_main_image_header_magic(context)) {
7333 gk20a_err(dev_from_gk20a(g), 7330 nvgpu_err(g,
7334 "Invalid main header: magic value"); 7331 "Invalid main header: magic value");
7335 return -EINVAL; 7332 return -EINVAL;
7336 } 7333 }
7337 num_gpcs = *(u32 *)(context + ctxsw_prog_main_image_num_gpcs_o()); 7334 num_gpcs = *(u32 *)(context + ctxsw_prog_main_image_num_gpcs_o());
7338 if (gpc_num >= num_gpcs) { 7335 if (gpc_num >= num_gpcs) {
7339 gk20a_err(dev_from_gk20a(g), 7336 nvgpu_err(g,
7340 "GPC 0x%08x is greater than total count 0x%08x!\n", 7337 "GPC 0x%08x is greater than total count 0x%08x!\n",
7341 gpc_num, num_gpcs); 7338 gpc_num, num_gpcs);
7342 return -EINVAL; 7339 return -EINVAL;
@@ -7357,7 +7354,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
7357 /* check local header magic */ 7354 /* check local header magic */
7358 context += ctxsw_prog_ucode_header_size_in_bytes(); 7355 context += ctxsw_prog_ucode_header_size_in_bytes();
7359 if (!check_local_header_magic(context)) { 7356 if (!check_local_header_magic(context)) {
7360 gk20a_err(dev_from_gk20a(g), 7357 nvgpu_err(g,
7361 "Invalid local header: magic value\n"); 7358 "Invalid local header: magic value\n");
7362 return -EINVAL; 7359 return -EINVAL;
7363 } 7360 }
@@ -7388,7 +7385,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
7388 (sm_dsm_perf_regs[sm_dsm_perf_reg_id] & tpc_gpc_mask); 7385 (sm_dsm_perf_regs[sm_dsm_perf_reg_id] & tpc_gpc_mask);
7389 7386
7390 if (chk_addr != addr) { 7387 if (chk_addr != addr) {
7391 gk20a_err(dev_from_gk20a(g), 7388 nvgpu_err(g,
7392 "Oops addr miss-match! : 0x%08x != 0x%08x\n", 7389 "Oops addr miss-match! : 0x%08x != 0x%08x\n",
7393 addr, chk_addr); 7390 addr, chk_addr);
7394 return -EINVAL; 7391 return -EINVAL;
@@ -7419,7 +7416,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
7419 tpc_gpc_mask); 7416 tpc_gpc_mask);
7420 7417
7421 if (chk_addr != addr) { 7418 if (chk_addr != addr) {
7422 gk20a_err(dev_from_gk20a(g), 7419 nvgpu_err(g,
7423 "Oops addr miss-match! : 0x%08x != 0x%08x\n", 7420 "Oops addr miss-match! : 0x%08x != 0x%08x\n",
7424 addr, chk_addr); 7421 addr, chk_addr);
7425 return -EINVAL; 7422 return -EINVAL;
@@ -7488,7 +7485,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
7488 /* last sanity check: did we somehow compute an offset outside the 7485 /* last sanity check: did we somehow compute an offset outside the
7489 * extended buffer? */ 7486 * extended buffer? */
7490 if (offset_to_segment > offset_to_segment_end) { 7487 if (offset_to_segment > offset_to_segment_end) {
7491 gk20a_err(dev_from_gk20a(g), 7488 nvgpu_err(g,
7492 "Overflow ctxsw buffer! 0x%08x > 0x%08x\n", 7489 "Overflow ctxsw buffer! 0x%08x > 0x%08x\n",
7493 offset_to_segment, offset_to_segment_end); 7490 offset_to_segment, offset_to_segment_end);
7494 return -EINVAL; 7491 return -EINVAL;
@@ -7680,7 +7677,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7680 7677
7681 context = (u8 *)context_buffer; 7678 context = (u8 *)context_buffer;
7682 if (!check_main_image_header_magic(context)) { 7679 if (!check_main_image_header_magic(context)) {
7683 gk20a_err(dev_from_gk20a(g), 7680 nvgpu_err(g,
7684 "Invalid main header: magic value"); 7681 "Invalid main header: magic value");
7685 return -EINVAL; 7682 return -EINVAL;
7686 } 7683 }
@@ -7689,7 +7686,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7689 /* Parse the FECS local header. */ 7686 /* Parse the FECS local header. */
7690 context += ctxsw_prog_ucode_header_size_in_bytes(); 7687 context += ctxsw_prog_ucode_header_size_in_bytes();
7691 if (!check_local_header_magic(context)) { 7688 if (!check_local_header_magic(context)) {
7692 gk20a_err(dev_from_gk20a(g), 7689 nvgpu_err(g,
7693 "Invalid FECS local header: magic value\n"); 7690 "Invalid FECS local header: magic value\n");
7694 return -EINVAL; 7691 return -EINVAL;
7695 } 7692 }
@@ -7724,7 +7721,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7724 } 7721 }
7725 7722
7726 if ((gpc_num + 1) > num_gpcs) { 7723 if ((gpc_num + 1) > num_gpcs) {
7727 gk20a_err(dev_from_gk20a(g), 7724 nvgpu_err(g,
7728 "GPC %d not in this context buffer.\n", 7725 "GPC %d not in this context buffer.\n",
7729 gpc_num); 7726 gpc_num);
7730 return -EINVAL; 7727 return -EINVAL;
@@ -7734,7 +7731,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7734 for (i = 0; i < num_gpcs; i++) { 7731 for (i = 0; i < num_gpcs; i++) {
7735 context += ctxsw_prog_ucode_header_size_in_bytes(); 7732 context += ctxsw_prog_ucode_header_size_in_bytes();
7736 if (!check_local_header_magic(context)) { 7733 if (!check_local_header_magic(context)) {
7737 gk20a_err(dev_from_gk20a(g), 7734 nvgpu_err(g,
7738 "Invalid GPCCS local header: magic value\n"); 7735 "Invalid GPCCS local header: magic value\n");
7739 return -EINVAL; 7736 return -EINVAL;
7740 7737
@@ -7751,7 +7748,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7751 num_tpcs = *(u32 *)(context + ctxsw_prog_local_image_num_tpcs_o()); 7748 num_tpcs = *(u32 *)(context + ctxsw_prog_local_image_num_tpcs_o());
7752 7749
7753 if ((i == gpc_num) && ((tpc_num + 1) > num_tpcs)) { 7750 if ((i == gpc_num) && ((tpc_num + 1) > num_tpcs)) {
7754 gk20a_err(dev_from_gk20a(g), 7751 nvgpu_err(g,
7755 "GPC %d TPC %d not in this context buffer.\n", 7752 "GPC %d TPC %d not in this context buffer.\n",
7756 gpc_num, tpc_num); 7753 gpc_num, tpc_num);
7757 return -EINVAL; 7754 return -EINVAL;
@@ -8159,7 +8156,7 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g)
8159 goto cleanup; 8156 goto cleanup;
8160 8157
8161 if (offset > hwpm_ctxsw_buffer_size) { 8158 if (offset > hwpm_ctxsw_buffer_size) {
8162 gk20a_err(dev_from_gk20a(g), "offset > buffer size"); 8159 nvgpu_err(g, "offset > buffer size");
8163 goto cleanup; 8160 goto cleanup;
8164 } 8161 }
8165 8162
@@ -8175,7 +8172,7 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g)
8175 8172
8176 return 0; 8173 return 0;
8177cleanup: 8174cleanup:
8178 gk20a_err(dev_from_gk20a(g), "Failed to create HWPM buffer offset map"); 8175 nvgpu_err(g, "Failed to create HWPM buffer offset map");
8179 nvgpu_big_free(g, map); 8176 nvgpu_big_free(g, map);
8180 return -EINVAL; 8177 return -EINVAL;
8181} 8178}
@@ -8213,7 +8210,7 @@ static int gr_gk20a_find_priv_offset_in_pm_buffer(struct gk20a *g,
8213 if (result) 8210 if (result)
8214 *priv_offset = result->offset; 8211 *priv_offset = result->offset;
8215 else { 8212 else {
8216 gk20a_err(dev_from_gk20a(g), "Lookup failed for address 0x%x", addr); 8213 nvgpu_err(g, "Lookup failed for address 0x%x", addr);
8217 err = -EINVAL; 8214 err = -EINVAL;
8218 } 8215 }
8219 return err; 8216 return err;
@@ -8278,7 +8275,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8278 */ 8275 */
8279 err = gr_gk20a_disable_ctxsw(g); 8276 err = gr_gk20a_disable_ctxsw(g);
8280 if (err) { 8277 if (err) {
8281 gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw"); 8278 nvgpu_err(g, "unable to stop gr ctxsw");
8282 /* this should probably be ctx-fatal... */ 8279 /* this should probably be ctx-fatal... */
8283 goto cleanup; 8280 goto cleanup;
8284 } 8281 }
@@ -8418,7 +8415,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8418 if (!pm_ctx_ready) { 8415 if (!pm_ctx_ready) {
8419 /* Make sure ctx buffer was initialized */ 8416 /* Make sure ctx buffer was initialized */
8420 if (!ch_ctx->pm_ctx.mem.pages) { 8417 if (!ch_ctx->pm_ctx.mem.pages) {
8421 gk20a_err(dev_from_gk20a(g), 8418 nvgpu_err(g,
8422 "Invalid ctx buffer"); 8419 "Invalid ctx buffer");
8423 err = -EINVAL; 8420 err = -EINVAL;
8424 goto cleanup; 8421 goto cleanup;
@@ -8515,7 +8512,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8515 if (restart_gr_ctxsw) { 8512 if (restart_gr_ctxsw) {
8516 int tmp_err = gr_gk20a_enable_ctxsw(g); 8513 int tmp_err = gr_gk20a_enable_ctxsw(g);
8517 if (tmp_err) { 8514 if (tmp_err) {
8518 gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n"); 8515 nvgpu_err(g, "unable to restart ctxsw!\n");
8519 err = tmp_err; 8516 err = tmp_err;
8520 } 8517 }
8521 } 8518 }
@@ -8659,7 +8656,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc,
8659 * enabled, the sm will never lock down. */ 8656 * enabled, the sm will never lock down. */
8660 if (!mmu_debug_mode_enabled && 8657 if (!mmu_debug_mode_enabled &&
8661 (g->ops.mm.mmu_fault_pending(g))) { 8658 (g->ops.mm.mmu_fault_pending(g))) {
8662 gk20a_err(dev_from_gk20a(g), 8659 nvgpu_err(g,
8663 "GPC%d TPC%d: mmu fault pending," 8660 "GPC%d TPC%d: mmu fault pending,"
8664 " sm will never lock down!", gpc, tpc); 8661 " sm will never lock down!", gpc, tpc);
8665 return -EFAULT; 8662 return -EFAULT;
@@ -8684,9 +8681,9 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc,
8684 warps_trapped = (u64)gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_1_r() + offset) << 32; 8681 warps_trapped = (u64)gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_1_r() + offset) << 32;
8685 warps_trapped |= gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_r() + offset); 8682 warps_trapped |= gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_r() + offset);
8686 8683
8687 gk20a_err(dev_from_gk20a(g), 8684 nvgpu_err(g,
8688 "GPC%d TPC%d: timed out while trying to lock down SM", gpc, tpc); 8685 "GPC%d TPC%d: timed out while trying to lock down SM", gpc, tpc);
8689 gk20a_err(dev_from_gk20a(g), 8686 nvgpu_err(g,
8690 "STATUS0(0x%x)=0x%x CONTROL0=0x%x VALID_MASK=0x%llx PAUSE_MASK=0x%llx TRAP_MASK=0x%llx\n", 8687 "STATUS0(0x%x)=0x%x CONTROL0=0x%x VALID_MASK=0x%llx PAUSE_MASK=0x%llx TRAP_MASK=0x%llx\n",
8691 gr_gpc0_tpc0_sm_dbgr_status0_r() + offset, dbgr_status0, dbgr_control0, 8688 gr_gpc0_tpc0_sm_dbgr_status0_r() + offset, dbgr_status0, dbgr_control0,
8692 warps_valid, warps_paused, warps_trapped); 8689 warps_valid, warps_paused, warps_trapped);
@@ -8707,7 +8704,7 @@ void gk20a_suspend_single_sm(struct gk20a *g,
8707 8704
8708 /* if an SM debugger isn't attached, skip suspend */ 8705 /* if an SM debugger isn't attached, skip suspend */
8709 if (!gk20a_gr_sm_debugger_attached(g)) { 8706 if (!gk20a_gr_sm_debugger_attached(g)) {
8710 gk20a_err(dev_from_gk20a(g), 8707 nvgpu_err(g,
8711 "SM debugger not attached, skipping suspend!\n"); 8708 "SM debugger not attached, skipping suspend!\n");
8712 return; 8709 return;
8713 } 8710 }
@@ -8722,7 +8719,7 @@ void gk20a_suspend_single_sm(struct gk20a *g,
8722 err = gk20a_gr_wait_for_sm_lock_down(g, gpc, tpc, 8719 err = gk20a_gr_wait_for_sm_lock_down(g, gpc, tpc,
8723 global_esr_mask, check_errors); 8720 global_esr_mask, check_errors);
8724 if (err) { 8721 if (err) {
8725 gk20a_err(dev_from_gk20a(g), 8722 nvgpu_err(g,
8726 "SuspendSm failed\n"); 8723 "SuspendSm failed\n");
8727 return; 8724 return;
8728 } 8725 }
@@ -8738,7 +8735,7 @@ void gk20a_suspend_all_sms(struct gk20a *g,
8738 8735
8739 /* if an SM debugger isn't attached, skip suspend */ 8736 /* if an SM debugger isn't attached, skip suspend */
8740 if (!gk20a_gr_sm_debugger_attached(g)) { 8737 if (!gk20a_gr_sm_debugger_attached(g)) {
8741 gk20a_err(dev_from_gk20a(g), 8738 nvgpu_err(g,
8742 "SM debugger not attached, skipping suspend!\n"); 8739 "SM debugger not attached, skipping suspend!\n");
8743 return; 8740 return;
8744 } 8741 }
@@ -8759,7 +8756,7 @@ void gk20a_suspend_all_sms(struct gk20a *g,
8759 gk20a_gr_wait_for_sm_lock_down(g, gpc, tpc, 8756 gk20a_gr_wait_for_sm_lock_down(g, gpc, tpc,
8760 global_esr_mask, check_errors); 8757 global_esr_mask, check_errors);
8761 if (err) { 8758 if (err) {
8762 gk20a_err(dev_from_gk20a(g), 8759 nvgpu_err(g,
8763 "SuspendAllSms failed\n"); 8760 "SuspendAllSms failed\n");
8764 return; 8761 return;
8765 } 8762 }
@@ -9068,7 +9065,7 @@ int gr_gk20a_set_sm_debug_mode(struct gk20a *g,
9068 9065
9069 err = gr_gk20a_exec_ctx_ops(ch, ops, i, i, 0); 9066 err = gr_gk20a_exec_ctx_ops(ch, ops, i, i, 0);
9070 if (err) 9067 if (err)
9071 gk20a_err(dev_from_gk20a(g), "Failed to access register\n"); 9068 nvgpu_err(g, "Failed to access register\n");
9072 nvgpu_kfree(g, ops); 9069 nvgpu_kfree(g, ops);
9073 return err; 9070 return err;
9074} 9071}
@@ -9188,7 +9185,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g,
9188 9185
9189 err = gr_gk20a_disable_ctxsw(g); 9186 err = gr_gk20a_disable_ctxsw(g);
9190 if (err) { 9187 if (err) {
9191 gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw"); 9188 nvgpu_err(g, "unable to stop gr ctxsw");
9192 goto clean_up; 9189 goto clean_up;
9193 } 9190 }
9194 9191
@@ -9206,7 +9203,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g,
9206 9203
9207 err = gr_gk20a_enable_ctxsw(g); 9204 err = gr_gk20a_enable_ctxsw(g);
9208 if (err) 9205 if (err)
9209 gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n"); 9206 nvgpu_err(g, "unable to restart ctxsw!\n");
9210 9207
9211 *ctx_resident_ch_fd = local_ctx_resident_ch_fd; 9208 *ctx_resident_ch_fd = local_ctx_resident_ch_fd;
9212 9209
@@ -9230,7 +9227,7 @@ int gr_gk20a_resume_contexts(struct gk20a *g,
9230 9227
9231 err = gr_gk20a_disable_ctxsw(g); 9228 err = gr_gk20a_disable_ctxsw(g);
9232 if (err) { 9229 if (err) {
9233 gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw"); 9230 nvgpu_err(g, "unable to stop gr ctxsw");
9234 goto clean_up; 9231 goto clean_up;
9235 } 9232 }
9236 9233
@@ -9244,7 +9241,7 @@ int gr_gk20a_resume_contexts(struct gk20a *g,
9244 9241
9245 err = gr_gk20a_enable_ctxsw(g); 9242 err = gr_gk20a_enable_ctxsw(g);
9246 if (err) 9243 if (err)
9247 gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n"); 9244 nvgpu_err(g, "unable to restart ctxsw!\n");
9248 9245
9249 *ctx_resident_ch_fd = local_ctx_resident_ch_fd; 9246 *ctx_resident_ch_fd = local_ctx_resident_ch_fd;
9250 9247
@@ -9308,7 +9305,7 @@ int gr_gk20a_inval_icache(struct gk20a *g, struct channel_gk20a *ch)
9308 9305
9309 err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 0, 1); 9306 err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 0, 1);
9310 if (err) { 9307 if (err) {
9311 gk20a_err(dev_from_gk20a(g), "Failed to read register"); 9308 nvgpu_err(g, "Failed to read register");
9312 return err; 9309 return err;
9313 } 9310 }
9314 9311
@@ -9318,7 +9315,7 @@ int gr_gk20a_inval_icache(struct gk20a *g, struct channel_gk20a *ch)
9318 ops.value_lo = set_field(regval, gr_pri_gpcs_gcc_dbg_invalidate_m(), 1); 9315 ops.value_lo = set_field(regval, gr_pri_gpcs_gcc_dbg_invalidate_m(), 1);
9319 err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 1, 0); 9316 err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 1, 0);
9320 if (err) { 9317 if (err) {
9321 gk20a_err(dev_from_gk20a(g), "Failed to write register"); 9318 nvgpu_err(g, "Failed to write register");
9322 return err; 9319 return err;
9323 } 9320 }
9324 9321
@@ -9326,7 +9323,7 @@ int gr_gk20a_inval_icache(struct gk20a *g, struct channel_gk20a *ch)
9326 ops.offset = gr_pri_gpc0_tpc0_sm_cache_control_r(); 9323 ops.offset = gr_pri_gpc0_tpc0_sm_cache_control_r();
9327 err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 0, 1); 9324 err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 0, 1);
9328 if (err) { 9325 if (err) {
9329 gk20a_err(dev_from_gk20a(g), "Failed to read register"); 9326 nvgpu_err(g, "Failed to read register");
9330 return err; 9327 return err;
9331 } 9328 }
9332 9329
@@ -9380,7 +9377,7 @@ int gr_gk20a_wait_for_pause(struct gk20a *g, struct warpstate *w_state)
9380 err = gk20a_gr_lock_down_sm(g, gpc, tpc, global_mask, false); 9377 err = gk20a_gr_lock_down_sm(g, gpc, tpc, global_mask, false);
9381 9378
9382 if (err) { 9379 if (err) {
9383 gk20a_err(dev_from_gk20a(g), "sm did not lock down!\n"); 9380 nvgpu_err(g, "sm did not lock down!");
9384 return err; 9381 return err;
9385 } 9382 }
9386 } 9383 }
diff --git a/drivers/gpu/nvgpu/gk20a/hal.c b/drivers/gpu/nvgpu/gk20a/hal.c
index dc4fcf1c..bbde885f 100644
--- a/drivers/gpu/nvgpu/gk20a/hal.c
+++ b/drivers/gpu/nvgpu/gk20a/hal.c
@@ -23,6 +23,8 @@
23#include "nvgpu_gpuid_t19x.h" 23#include "nvgpu_gpuid_t19x.h"
24#endif 24#endif
25 25
26#include <nvgpu/log.h>
27
26int gpu_init_hal(struct gk20a *g) 28int gpu_init_hal(struct gk20a *g)
27{ 29{
28 u32 ver = g->gpu_characteristics.arch + g->gpu_characteristics.impl; 30 u32 ver = g->gpu_characteristics.arch + g->gpu_characteristics.impl;
@@ -54,7 +56,7 @@ int gpu_init_hal(struct gk20a *g)
54 break; 56 break;
55#endif 57#endif
56 default: 58 default:
57 gk20a_err(g->dev, "no support for %x", ver); 59 nvgpu_err(g, "no support for %x", ver);
58 return -ENODEV; 60 return -ENODEV;
59 } 61 }
60 62
diff --git a/drivers/gpu/nvgpu/gk20a/hal_gk20a.c b/drivers/gpu/nvgpu/gk20a/hal_gk20a.c
index 7a13ed9c..00d57022 100644
--- a/drivers/gpu/nvgpu/gk20a/hal_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/hal_gk20a.c
@@ -35,6 +35,8 @@
35#include "css_gr_gk20a.h" 35#include "css_gr_gk20a.h"
36#include "pramin_gk20a.h" 36#include "pramin_gk20a.h"
37 37
38#include <nvgpu/log.h>
39
38#include <nvgpu/hw/gk20a/hw_proj_gk20a.h> 40#include <nvgpu/hw/gk20a/hw_proj_gk20a.h>
39 41
40static struct gpu_ops gk20a_ops = { 42static struct gpu_ops gk20a_ops = {
@@ -132,7 +134,7 @@ static int gk20a_get_litter_value(struct gk20a *g, int value)
132 ret = 0; 134 ret = 0;
133 break; 135 break;
134 default: 136 default:
135 gk20a_err(dev_from_gk20a(g), "Missing definition %d", value); 137 nvgpu_err(g, "Missing definition %d", value);
136 BUG(); 138 BUG();
137 break; 139 break;
138 } 140 }
diff --git a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
index 1b9d515c..f8416d55 100644
--- a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
@@ -20,6 +20,7 @@
20 20
21#include <trace/events/gk20a.h> 21#include <trace/events/gk20a.h>
22#include <nvgpu/timers.h> 22#include <nvgpu/timers.h>
23#include <nvgpu/log.h>
23 24
24#include "gk20a.h" 25#include "gk20a.h"
25 26
@@ -160,8 +161,7 @@ static int gk20a_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
160 } while (!nvgpu_timeout_expired(&timeout)); 161 } while (!nvgpu_timeout_expired(&timeout));
161 162
162 if (nvgpu_timeout_peek_expired(&timeout)) { 163 if (nvgpu_timeout_peek_expired(&timeout)) {
163 gk20a_err(dev_from_gk20a(g), 164 nvgpu_err(g, "comp tag clear timeout");
164 "comp tag clear timeout\n");
165 err = -EBUSY; 165 err = -EBUSY;
166 goto out; 166 goto out;
167 } 167 }
@@ -186,7 +186,7 @@ static void gk20a_ltc_isr(struct gk20a *g)
186 u32 intr; 186 u32 intr;
187 187
188 intr = gk20a_readl(g, ltc_ltc0_ltss_intr_r()); 188 intr = gk20a_readl(g, ltc_ltc0_ltss_intr_r());
189 gk20a_err(dev_from_gk20a(g), "ltc: %08x\n", intr); 189 nvgpu_err(g, "ltc: %08x\n", intr);
190 gk20a_writel(g, ltc_ltc0_ltss_intr_r(), intr); 190 gk20a_writel(g, ltc_ltc0_ltss_intr_r(), intr);
191} 191}
192 192
@@ -215,7 +215,7 @@ static int gk20a_determine_L2_size_bytes(struct gk20a *g)
215 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v()) { 215 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v()) {
216 sets = 16; 216 sets = 16;
217 } else { 217 } else {
218 dev_err(dev_from_gk20a(g), 218 nvgpu_err(g,
219 "Unknown constant %u for active sets", 219 "Unknown constant %u for active sets",
220 (unsigned)active_sets_value); 220 (unsigned)active_sets_value);
221 sets = 0; 221 sets = 0;
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index ab3dc3f9..78332ee7 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -39,6 +39,7 @@
39#include <nvgpu/allocator.h> 39#include <nvgpu/allocator.h>
40#include <nvgpu/semaphore.h> 40#include <nvgpu/semaphore.h>
41#include <nvgpu/page_allocator.h> 41#include <nvgpu/page_allocator.h>
42#include <nvgpu/log.h>
42 43
43#include "gk20a.h" 44#include "gk20a.h"
44#include "mm_gk20a.h" 45#include "mm_gk20a.h"
@@ -536,7 +537,7 @@ static int gk20a_vidmem_clear_all(struct gk20a *g)
536 0, 537 0,
537 NULL); 538 NULL);
538 if (err) { 539 if (err) {
539 gk20a_err(g->dev, 540 nvgpu_err(g,
540 "Failed to clear vidmem region 1 : %d", err); 541 "Failed to clear vidmem region 1 : %d", err);
541 return err; 542 return err;
542 } 543 }
@@ -555,7 +556,7 @@ static int gk20a_vidmem_clear_all(struct gk20a *g)
555 0, 556 0,
556 &gk20a_fence_out); 557 &gk20a_fence_out);
557 if (err) { 558 if (err) {
558 gk20a_err(g->dev, 559 nvgpu_err(g,
559 "Failed to clear vidmem region 2 : %d", err); 560 "Failed to clear vidmem region 2 : %d", err);
560 return err; 561 return err;
561 } 562 }
@@ -575,7 +576,7 @@ static int gk20a_vidmem_clear_all(struct gk20a *g)
575 576
576 gk20a_fence_put(gk20a_fence_out); 577 gk20a_fence_put(gk20a_fence_out);
577 if (err) { 578 if (err) {
578 gk20a_err(g->dev, 579 nvgpu_err(g,
579 "fence wait failed for CE execute ops"); 580 "fence wait failed for CE execute ops");
580 return err; 581 return err;
581 } 582 }
@@ -591,7 +592,6 @@ static int gk20a_init_vidmem(struct mm_gk20a *mm)
591{ 592{
592#if defined(CONFIG_GK20A_VIDMEM) 593#if defined(CONFIG_GK20A_VIDMEM)
593 struct gk20a *g = mm->g; 594 struct gk20a *g = mm->g;
594 struct device *d = dev_from_gk20a(g);
595 size_t size = g->ops.mm.get_vidmem_size ? 595 size_t size = g->ops.mm.get_vidmem_size ?
596 g->ops.mm.get_vidmem_size(g) : 0; 596 g->ops.mm.get_vidmem_size(g) : 0;
597 u64 bootstrap_base, bootstrap_size, base; 597 u64 bootstrap_base, bootstrap_size, base;
@@ -625,7 +625,7 @@ static int gk20a_init_vidmem(struct mm_gk20a *mm)
625 default_page_size, 625 default_page_size,
626 GPU_ALLOC_4K_VIDMEM_PAGES); 626 GPU_ALLOC_4K_VIDMEM_PAGES);
627 if (err) { 627 if (err) {
628 gk20a_err(d, "Failed to register vidmem for size %zu: %d", 628 nvgpu_err(g, "Failed to register vidmem for size %zu: %d",
629 size, err); 629 size, err);
630 return err; 630 return err;
631 } 631 }
@@ -796,7 +796,7 @@ void gk20a_init_mm_ce_context(struct gk20a *g)
796 NULL); 796 NULL);
797 797
798 if (g->mm.vidmem.ce_ctx_id == (u32)~0) 798 if (g->mm.vidmem.ce_ctx_id == (u32)~0)
799 gk20a_err(g->dev, 799 nvgpu_err(g,
800 "Failed to allocate CE context for vidmem page clearing support"); 800 "Failed to allocate CE context for vidmem page clearing support");
801 } 801 }
802#endif 802#endif
@@ -882,7 +882,6 @@ static void unmap_gmmu_phys_pages(struct gk20a_mm_entry *entry)
882static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, 882static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
883 struct gk20a_mm_entry *entry) 883 struct gk20a_mm_entry *entry)
884{ 884{
885 struct device *d = dev_from_vm(vm);
886 struct gk20a *g = gk20a_from_vm(vm); 885 struct gk20a *g = gk20a_from_vm(vm);
887 u32 num_pages = 1 << order; 886 u32 num_pages = 1 << order;
888 u32 len = num_pages * PAGE_SIZE; 887 u32 len = num_pages * PAGE_SIZE;
@@ -905,7 +904,7 @@ static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
905 904
906 905
907 if (err) { 906 if (err) {
908 gk20a_err(d, "memory allocation failed"); 907 nvgpu_err(g, "memory allocation failed");
909 return -ENOMEM; 908 return -ENOMEM;
910 } 909 }
911 910
@@ -1209,7 +1208,7 @@ void gk20a_vm_put_buffers(struct vm_gk20a *vm,
1209static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset, 1208static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
1210 struct vm_gk20a_mapping_batch *batch) 1209 struct vm_gk20a_mapping_batch *batch)
1211{ 1210{
1212 struct device *d = dev_from_vm(vm); 1211 struct gk20a *g = vm->mm->g;
1213 struct mapped_buffer_node *mapped_buffer; 1212 struct mapped_buffer_node *mapped_buffer;
1214 1213
1215 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 1214 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
@@ -1217,7 +1216,7 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
1217 mapped_buffer = find_mapped_buffer_locked(vm->mapped_buffers, offset); 1216 mapped_buffer = find_mapped_buffer_locked(vm->mapped_buffers, offset);
1218 if (!mapped_buffer) { 1217 if (!mapped_buffer) {
1219 nvgpu_mutex_release(&vm->update_gmmu_lock); 1218 nvgpu_mutex_release(&vm->update_gmmu_lock);
1220 gk20a_err(d, "invalid addr to unmap 0x%llx", offset); 1219 nvgpu_err(g, "invalid addr to unmap 0x%llx", offset);
1221 return; 1220 return;
1222 } 1221 }
1223 1222
@@ -1240,7 +1239,7 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
1240 1239
1241 if (mapped_buffer->user_mapped == 0) { 1240 if (mapped_buffer->user_mapped == 0) {
1242 nvgpu_mutex_release(&vm->update_gmmu_lock); 1241 nvgpu_mutex_release(&vm->update_gmmu_lock);
1243 gk20a_err(d, "addr already unmapped from user 0x%llx", offset); 1242 nvgpu_err(g, "addr already unmapped from user 0x%llx", offset);
1244 return; 1243 return;
1245 } 1244 }
1246 1245
@@ -1284,7 +1283,7 @@ u64 gk20a_vm_alloc_va(struct vm_gk20a *vm,
1284 1283
1285 offset = nvgpu_alloc(vma, size); 1284 offset = nvgpu_alloc(vma, size);
1286 if (!offset) { 1285 if (!offset) {
1287 gk20a_err(dev_from_vm(vm), 1286 nvgpu_err(vm->mm->g,
1288 "%s oom: sz=0x%llx", vma->name, size); 1287 "%s oom: sz=0x%llx", vma->name, size);
1289 return 0; 1288 return 0;
1290 } 1289 }
@@ -1405,14 +1404,13 @@ static int setup_buffer_kind_and_compression(struct vm_gk20a *vm,
1405{ 1404{
1406 bool kind_compressible; 1405 bool kind_compressible;
1407 struct gk20a *g = gk20a_from_vm(vm); 1406 struct gk20a *g = gk20a_from_vm(vm);
1408 struct device *d = dev_from_gk20a(g);
1409 int ctag_granularity = g->ops.fb.compression_page_size(g); 1407 int ctag_granularity = g->ops.fb.compression_page_size(g);
1410 1408
1411 if (unlikely(bfr->kind_v == gmmu_pte_kind_invalid_v())) 1409 if (unlikely(bfr->kind_v == gmmu_pte_kind_invalid_v()))
1412 bfr->kind_v = gmmu_pte_kind_pitch_v(); 1410 bfr->kind_v = gmmu_pte_kind_pitch_v();
1413 1411
1414 if (unlikely(!gk20a_kind_is_supported(bfr->kind_v))) { 1412 if (unlikely(!gk20a_kind_is_supported(bfr->kind_v))) {
1415 gk20a_err(d, "kind 0x%x not supported", bfr->kind_v); 1413 nvgpu_err(g, "kind 0x%x not supported", bfr->kind_v);
1416 return -EINVAL; 1414 return -EINVAL;
1417 } 1415 }
1418 1416
@@ -1423,7 +1421,7 @@ static int setup_buffer_kind_and_compression(struct vm_gk20a *vm,
1423 bfr->uc_kind_v = gk20a_get_uncompressed_kind(bfr->kind_v); 1421 bfr->uc_kind_v = gk20a_get_uncompressed_kind(bfr->kind_v);
1424 if (unlikely(bfr->uc_kind_v == gmmu_pte_kind_invalid_v())) { 1422 if (unlikely(bfr->uc_kind_v == gmmu_pte_kind_invalid_v())) {
1425 /* shouldn't happen, but it is worth cross-checking */ 1423 /* shouldn't happen, but it is worth cross-checking */
1426 gk20a_err(d, "comptag kind 0x%x can't be" 1424 nvgpu_err(g, "comptag kind 0x%x can't be"
1427 " downgraded to uncompressed kind", 1425 " downgraded to uncompressed kind",
1428 bfr->kind_v); 1426 bfr->kind_v);
1429 return -EINVAL; 1427 return -EINVAL;
@@ -1432,9 +1430,6 @@ static int setup_buffer_kind_and_compression(struct vm_gk20a *vm,
1432 /* comptags only supported for suitable kinds, 128KB pagesize */ 1430 /* comptags only supported for suitable kinds, 128KB pagesize */
1433 if (kind_compressible && 1431 if (kind_compressible &&
1434 vm->gmmu_page_sizes[pgsz_idx] < g->ops.fb.compressible_page_size(g)) { 1432 vm->gmmu_page_sizes[pgsz_idx] < g->ops.fb.compressible_page_size(g)) {
1435 /*
1436 gk20a_warn(d, "comptags specified"
1437 " but pagesize being used doesn't support it");*/
1438 /* it is safe to fall back to uncompressed as 1433 /* it is safe to fall back to uncompressed as
1439 functionality is not harmed */ 1434 functionality is not harmed */
1440 bfr->kind_v = bfr->uc_kind_v; 1435 bfr->kind_v = bfr->uc_kind_v;
@@ -1453,19 +1448,19 @@ static int validate_fixed_buffer(struct vm_gk20a *vm,
1453 u64 map_offset, u64 map_size, 1448 u64 map_offset, u64 map_size,
1454 struct vm_reserved_va_node **pva_node) 1449 struct vm_reserved_va_node **pva_node)
1455{ 1450{
1456 struct device *dev = dev_from_vm(vm); 1451 struct gk20a *g = vm->mm->g;
1457 struct vm_reserved_va_node *va_node; 1452 struct vm_reserved_va_node *va_node;
1458 struct mapped_buffer_node *buffer; 1453 struct mapped_buffer_node *buffer;
1459 u64 map_end = map_offset + map_size; 1454 u64 map_end = map_offset + map_size;
1460 1455
1461 /* can wrap around with insane map_size; zero is disallowed too */ 1456 /* can wrap around with insane map_size; zero is disallowed too */
1462 if (map_end <= map_offset) { 1457 if (map_end <= map_offset) {
1463 gk20a_warn(dev, "fixed offset mapping with invalid map_size"); 1458 nvgpu_warn(g, "fixed offset mapping with invalid map_size");
1464 return -EINVAL; 1459 return -EINVAL;
1465 } 1460 }
1466 1461
1467 if (map_offset & (vm->gmmu_page_sizes[bfr->pgsz_idx] - 1)) { 1462 if (map_offset & (vm->gmmu_page_sizes[bfr->pgsz_idx] - 1)) {
1468 gk20a_err(dev, "map offset must be buffer page size aligned 0x%llx", 1463 nvgpu_err(g, "map offset must be buffer page size aligned 0x%llx",
1469 map_offset); 1464 map_offset);
1470 return -EINVAL; 1465 return -EINVAL;
1471 } 1466 }
@@ -1474,13 +1469,13 @@ static int validate_fixed_buffer(struct vm_gk20a *vm,
1474 * userspace-managed address spaces */ 1469 * userspace-managed address spaces */
1475 va_node = addr_to_reservation(vm, map_offset); 1470 va_node = addr_to_reservation(vm, map_offset);
1476 if (!va_node && !vm->userspace_managed) { 1471 if (!va_node && !vm->userspace_managed) {
1477 gk20a_warn(dev, "fixed offset mapping without space allocation"); 1472 nvgpu_warn(g, "fixed offset mapping without space allocation");
1478 return -EINVAL; 1473 return -EINVAL;
1479 } 1474 }
1480 1475
1481 /* Mapped area should fit inside va, if there's one */ 1476 /* Mapped area should fit inside va, if there's one */
1482 if (va_node && map_end > va_node->vaddr_start + va_node->size) { 1477 if (va_node && map_end > va_node->vaddr_start + va_node->size) {
1483 gk20a_warn(dev, "fixed offset mapping size overflows va node"); 1478 nvgpu_warn(g, "fixed offset mapping size overflows va node");
1484 return -EINVAL; 1479 return -EINVAL;
1485 } 1480 }
1486 1481
@@ -1490,7 +1485,7 @@ static int validate_fixed_buffer(struct vm_gk20a *vm,
1490 buffer = find_mapped_buffer_less_than_locked( 1485 buffer = find_mapped_buffer_less_than_locked(
1491 vm->mapped_buffers, map_offset + map_size); 1486 vm->mapped_buffers, map_offset + map_size);
1492 if (buffer && buffer->addr + buffer->size > map_offset) { 1487 if (buffer && buffer->addr + buffer->size > map_offset) {
1493 gk20a_warn(dev, "overlapping buffer map requested"); 1488 nvgpu_warn(g, "overlapping buffer map requested");
1494 return -EINVAL; 1489 return -EINVAL;
1495 } 1490 }
1496 1491
@@ -1517,7 +1512,6 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
1517{ 1512{
1518 int err = 0; 1513 int err = 0;
1519 bool allocated = false; 1514 bool allocated = false;
1520 struct device *d = dev_from_vm(vm);
1521 struct gk20a *g = gk20a_from_vm(vm); 1515 struct gk20a *g = gk20a_from_vm(vm);
1522 int ctag_granularity = g->ops.fb.compression_page_size(g); 1516 int ctag_granularity = g->ops.fb.compression_page_size(g);
1523 u32 ctag_lines = DIV_ROUND_UP_ULL(size, ctag_granularity); 1517 u32 ctag_lines = DIV_ROUND_UP_ULL(size, ctag_granularity);
@@ -1527,7 +1521,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
1527 map_offset = gk20a_vm_alloc_va(vm, size, 1521 map_offset = gk20a_vm_alloc_va(vm, size,
1528 pgsz_idx); 1522 pgsz_idx);
1529 if (!map_offset) { 1523 if (!map_offset) {
1530 gk20a_err(d, "failed to allocate va space"); 1524 nvgpu_err(g, "failed to allocate va space");
1531 err = -ENOMEM; 1525 err = -ENOMEM;
1532 goto fail_alloc; 1526 goto fail_alloc;
1533 } 1527 }
@@ -1563,7 +1557,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
1563 priv, 1557 priv,
1564 aperture); 1558 aperture);
1565 if (err) { 1559 if (err) {
1566 gk20a_err(d, "failed to update ptes on map"); 1560 nvgpu_err(g, "failed to update ptes on map");
1567 goto fail_validate; 1561 goto fail_validate;
1568 } 1562 }
1569 1563
@@ -1577,7 +1571,7 @@ fail_validate:
1577 if (allocated) 1571 if (allocated)
1578 gk20a_vm_free_va(vm, map_offset, size, pgsz_idx); 1572 gk20a_vm_free_va(vm, map_offset, size, pgsz_idx);
1579fail_alloc: 1573fail_alloc:
1580 gk20a_err(d, "%s: failed with err=%d\n", __func__, err); 1574 nvgpu_err(g, "%s: failed with err=%d\n", __func__, err);
1581 return 0; 1575 return 0;
1582} 1576}
1583 1577
@@ -1596,8 +1590,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
1596 if (va_allocated) { 1590 if (va_allocated) {
1597 err = gk20a_vm_free_va(vm, vaddr, size, pgsz_idx); 1591 err = gk20a_vm_free_va(vm, vaddr, size, pgsz_idx);
1598 if (err) { 1592 if (err) {
1599 dev_err(dev_from_vm(vm), 1593 nvgpu_err(g, "failed to free va");
1600 "failed to free va");
1601 return; 1594 return;
1602 } 1595 }
1603 } 1596 }
@@ -1614,8 +1607,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
1614 sparse, 0, 1607 sparse, 0,
1615 APERTURE_INVALID); /* don't care for unmap */ 1608 APERTURE_INVALID); /* don't care for unmap */
1616 if (err) 1609 if (err)
1617 dev_err(dev_from_vm(vm), 1610 nvgpu_err(g, "failed to update gmmu ptes on unmap");
1618 "failed to update gmmu ptes on unmap");
1619 1611
1620 /* flush l2 so any dirty lines are written out *now*. 1612 /* flush l2 so any dirty lines are written out *now*.
1621 * also as we could potentially be switching this buffer 1613 * also as we could potentially be switching this buffer
@@ -1647,7 +1639,7 @@ static enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
1647 } else if (WARN_ON(buf_owner == g && !g->mm.vidmem_is_vidmem)) { 1639 } else if (WARN_ON(buf_owner == g && !g->mm.vidmem_is_vidmem)) {
1648 /* Looks like our video memory, but this gpu doesn't support 1640 /* Looks like our video memory, but this gpu doesn't support
1649 * it. Warn about a bug and bail out */ 1641 * it. Warn about a bug and bail out */
1650 gk20a_warn(dev_from_gk20a(g), 1642 nvgpu_warn(g,
1651 "dmabuf is our vidmem but we don't have local vidmem"); 1643 "dmabuf is our vidmem but we don't have local vidmem");
1652 return APERTURE_INVALID; 1644 return APERTURE_INVALID;
1653 } else if (buf_owner != g) { 1645 } else if (buf_owner != g) {
@@ -1860,7 +1852,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
1860 if (!g->mm.vidmem.cleared) { 1852 if (!g->mm.vidmem.cleared) {
1861 err = gk20a_vidmem_clear_all(g); 1853 err = gk20a_vidmem_clear_all(g);
1862 if (err) { 1854 if (err) {
1863 gk20a_err(g->dev, 1855 nvgpu_err(g,
1864 "failed to clear whole vidmem"); 1856 "failed to clear whole vidmem");
1865 goto err_kfree; 1857 goto err_kfree;
1866 } 1858 }
@@ -2037,7 +2029,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
2037 2029
2038 if (user_mapped && vm->userspace_managed && 2030 if (user_mapped && vm->userspace_managed &&
2039 !(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)) { 2031 !(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)) {
2040 gk20a_err(d, 2032 nvgpu_err(g,
2041 "%s: non-fixed-offset mapping not available on userspace managed address spaces", 2033 "%s: non-fixed-offset mapping not available on userspace managed address spaces",
2042 __func__); 2034 __func__);
2043 return -EFAULT; 2035 return -EFAULT;
@@ -2068,7 +2060,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
2068 * track the difference between those two cases we have 2060 * track the difference between those two cases we have
2069 * to fail the mapping when we run out of SMMU space. 2061 * to fail the mapping when we run out of SMMU space.
2070 */ 2062 */
2071 gk20a_warn(d, "oom allocating tracking buffer"); 2063 nvgpu_warn(g, "oom allocating tracking buffer");
2072 goto clean_up; 2064 goto clean_up;
2073 } 2065 }
2074 2066
@@ -2111,7 +2103,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
2111 2103
2112 err = setup_buffer_kind_and_compression(vm, flags, &bfr, bfr.pgsz_idx); 2104 err = setup_buffer_kind_and_compression(vm, flags, &bfr, bfr.pgsz_idx);
2113 if (unlikely(err)) { 2105 if (unlikely(err)) {
2114 gk20a_err(d, "failure setting up kind and compression"); 2106 nvgpu_err(g, "failure setting up kind and compression");
2115 goto clean_up; 2107 goto clean_up;
2116 } 2108 }
2117 2109
@@ -2204,7 +2196,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
2204 /* TBD: check for multiple mapping of same buffer */ 2196 /* TBD: check for multiple mapping of same buffer */
2205 mapped_buffer = nvgpu_kzalloc(g, sizeof(*mapped_buffer)); 2197 mapped_buffer = nvgpu_kzalloc(g, sizeof(*mapped_buffer));
2206 if (!mapped_buffer) { 2198 if (!mapped_buffer) {
2207 gk20a_warn(d, "oom allocating tracking buffer"); 2199 nvgpu_warn(g, "oom allocating tracking buffer");
2208 goto clean_up; 2200 goto clean_up;
2209 } 2201 }
2210 mapped_buffer->dmabuf = dmabuf; 2202 mapped_buffer->dmabuf = dmabuf;
@@ -2230,7 +2222,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
2230 2222
2231 err = insert_mapped_buffer(vm, mapped_buffer); 2223 err = insert_mapped_buffer(vm, mapped_buffer);
2232 if (err) { 2224 if (err) {
2233 gk20a_err(d, "failed to insert into mapped buffer tree"); 2225 nvgpu_err(g, "failed to insert into mapped buffer tree");
2234 goto clean_up; 2226 goto clean_up;
2235 } 2227 }
2236 inserted = true; 2228 inserted = true;
@@ -2274,7 +2266,7 @@ int gk20a_vm_get_compbits_info(struct vm_gk20a *vm,
2274 u32 *flags) 2266 u32 *flags)
2275{ 2267{
2276 struct mapped_buffer_node *mapped_buffer; 2268 struct mapped_buffer_node *mapped_buffer;
2277 struct device *d = dev_from_vm(vm); 2269 struct gk20a *g = vm->mm->g;
2278 2270
2279 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 2271 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
2280 2272
@@ -2283,7 +2275,7 @@ int gk20a_vm_get_compbits_info(struct vm_gk20a *vm,
2283 if (!mapped_buffer || !mapped_buffer->user_mapped) 2275 if (!mapped_buffer || !mapped_buffer->user_mapped)
2284 { 2276 {
2285 nvgpu_mutex_release(&vm->update_gmmu_lock); 2277 nvgpu_mutex_release(&vm->update_gmmu_lock);
2286 gk20a_err(d, "%s: bad offset 0x%llx", __func__, mapping_gva); 2278 nvgpu_err(g, "%s: bad offset 0x%llx", __func__, mapping_gva);
2287 return -EFAULT; 2279 return -EFAULT;
2288 } 2280 }
2289 2281
@@ -2316,19 +2308,18 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2316{ 2308{
2317 struct mapped_buffer_node *mapped_buffer; 2309 struct mapped_buffer_node *mapped_buffer;
2318 struct gk20a *g = gk20a_from_vm(vm); 2310 struct gk20a *g = gk20a_from_vm(vm);
2319 struct device *d = dev_from_vm(vm);
2320 const bool fixed_mapping = 2311 const bool fixed_mapping =
2321 (flags & NVGPU_AS_MAP_BUFFER_COMPBITS_FLAGS_FIXED_OFFSET) != 0; 2312 (flags & NVGPU_AS_MAP_BUFFER_COMPBITS_FLAGS_FIXED_OFFSET) != 0;
2322 2313
2323 if (vm->userspace_managed && !fixed_mapping) { 2314 if (vm->userspace_managed && !fixed_mapping) {
2324 gk20a_err(d, 2315 nvgpu_err(g,
2325 "%s: non-fixed-offset mapping is not available on userspace managed address spaces", 2316 "%s: non-fixed-offset mapping is not available on userspace managed address spaces",
2326 __func__); 2317 __func__);
2327 return -EFAULT; 2318 return -EFAULT;
2328 } 2319 }
2329 2320
2330 if (fixed_mapping && !vm->userspace_managed) { 2321 if (fixed_mapping && !vm->userspace_managed) {
2331 gk20a_err(d, 2322 nvgpu_err(g,
2332 "%s: fixed-offset mapping is available only on userspace managed address spaces", 2323 "%s: fixed-offset mapping is available only on userspace managed address spaces",
2333 __func__); 2324 __func__);
2334 return -EFAULT; 2325 return -EFAULT;
@@ -2341,13 +2332,13 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2341 2332
2342 if (!mapped_buffer || !mapped_buffer->user_mapped) { 2333 if (!mapped_buffer || !mapped_buffer->user_mapped) {
2343 nvgpu_mutex_release(&vm->update_gmmu_lock); 2334 nvgpu_mutex_release(&vm->update_gmmu_lock);
2344 gk20a_err(d, "%s: bad offset 0x%llx", __func__, mapping_gva); 2335 nvgpu_err(g, "%s: bad offset 0x%llx", __func__, mapping_gva);
2345 return -EFAULT; 2336 return -EFAULT;
2346 } 2337 }
2347 2338
2348 if (!mapped_buffer->ctags_mappable) { 2339 if (!mapped_buffer->ctags_mappable) {
2349 nvgpu_mutex_release(&vm->update_gmmu_lock); 2340 nvgpu_mutex_release(&vm->update_gmmu_lock);
2350 gk20a_err(d, "%s: comptags not mappable, offset 0x%llx", 2341 nvgpu_err(g, "%s: comptags not mappable, offset 0x%llx",
2351 __func__, mapping_gva); 2342 __func__, mapping_gva);
2352 return -EFAULT; 2343 return -EFAULT;
2353 } 2344 }
@@ -2366,7 +2357,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2366 2357
2367 if (!mapped_buffer->ctag_map_win_size) { 2358 if (!mapped_buffer->ctag_map_win_size) {
2368 nvgpu_mutex_release(&vm->update_gmmu_lock); 2359 nvgpu_mutex_release(&vm->update_gmmu_lock);
2369 gk20a_err(d, 2360 nvgpu_err(g,
2370 "%s: mapping 0x%llx does not have " 2361 "%s: mapping 0x%llx does not have "
2371 "mappable comptags", 2362 "mappable comptags",
2372 __func__, mapping_gva); 2363 __func__, mapping_gva);
@@ -2402,7 +2393,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2402 * before before the buffer is 2393 * before before the buffer is
2403 * unmapped */ 2394 * unmapped */
2404 nvgpu_mutex_release(&vm->update_gmmu_lock); 2395 nvgpu_mutex_release(&vm->update_gmmu_lock);
2405 gk20a_err(d, 2396 nvgpu_err(g,
2406 "%s: comptags cannot be mapped into allocated space", 2397 "%s: comptags cannot be mapped into allocated space",
2407 __func__); 2398 __func__);
2408 return -EINVAL; 2399 return -EINVAL;
@@ -2429,7 +2420,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2429 2420
2430 if (!mapped_buffer->ctag_map_win_addr) { 2421 if (!mapped_buffer->ctag_map_win_addr) {
2431 nvgpu_mutex_release(&vm->update_gmmu_lock); 2422 nvgpu_mutex_release(&vm->update_gmmu_lock);
2432 gk20a_err(d, 2423 nvgpu_err(g,
2433 "%s: failed to map comptags for mapping 0x%llx", 2424 "%s: failed to map comptags for mapping 0x%llx",
2434 __func__, mapping_gva); 2425 __func__, mapping_gva);
2435 return -ENOMEM; 2426 return -ENOMEM;
@@ -2437,7 +2428,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2437 } else if (fixed_mapping && *compbits_win_gva && 2428 } else if (fixed_mapping && *compbits_win_gva &&
2438 mapped_buffer->ctag_map_win_addr != *compbits_win_gva) { 2429 mapped_buffer->ctag_map_win_addr != *compbits_win_gva) {
2439 nvgpu_mutex_release(&vm->update_gmmu_lock); 2430 nvgpu_mutex_release(&vm->update_gmmu_lock);
2440 gk20a_err(d, 2431 nvgpu_err(g,
2441 "%s: re-requesting comptags map into mismatching address. buffer offset 0x" 2432 "%s: re-requesting comptags map into mismatching address. buffer offset 0x"
2442 "%llx, existing comptag map at 0x%llx, requested remap 0x%llx", 2433 "%llx, existing comptag map at 0x%llx, requested remap 0x%llx",
2443 __func__, mapping_gva, 2434 __func__, mapping_gva,
@@ -2486,7 +2477,7 @@ static u64 __gk20a_gmmu_map(struct vm_gk20a *vm,
2486 aperture); 2477 aperture);
2487 nvgpu_mutex_release(&vm->update_gmmu_lock); 2478 nvgpu_mutex_release(&vm->update_gmmu_lock);
2488 if (!vaddr) { 2479 if (!vaddr) {
2489 gk20a_err(dev_from_vm(vm), "failed to allocate va space"); 2480 nvgpu_err(g, "failed to allocate va space");
2490 return 0; 2481 return 0;
2491 } 2482 }
2492 2483
@@ -2553,7 +2544,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem)
2553 &gk20a_fence_out); 2544 &gk20a_fence_out);
2554 2545
2555 if (err) { 2546 if (err) {
2556 gk20a_err(g->dev, 2547 nvgpu_err(g,
2557 "Failed gk20a_ce_execute_ops[%d]", err); 2548 "Failed gk20a_ce_execute_ops[%d]", err);
2558 return err; 2549 return err;
2559 } 2550 }
@@ -2576,7 +2567,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem)
2576 2567
2577 gk20a_fence_put(gk20a_last_fence); 2568 gk20a_fence_put(gk20a_last_fence);
2578 if (err) 2569 if (err)
2579 gk20a_err(g->dev, 2570 nvgpu_err(g,
2580 "fence wait failed for CE execute ops"); 2571 "fence wait failed for CE execute ops");
2581 } 2572 }
2582 2573
@@ -2692,7 +2683,7 @@ int gk20a_get_sgtable(struct device *d, struct sg_table **sgt,
2692 int err = 0; 2683 int err = 0;
2693 *sgt = nvgpu_kzalloc(g, sizeof(struct sg_table)); 2684 *sgt = nvgpu_kzalloc(g, sizeof(struct sg_table));
2694 if (!(*sgt)) { 2685 if (!(*sgt)) {
2695 dev_err(d, "failed to allocate memory\n"); 2686 nvgpu_err(g, "failed to allocate memory\n");
2696 err = -ENOMEM; 2687 err = -ENOMEM;
2697 goto fail; 2688 goto fail;
2698 } 2689 }
@@ -2700,7 +2691,7 @@ int gk20a_get_sgtable(struct device *d, struct sg_table **sgt,
2700 cpuva, iova, 2691 cpuva, iova,
2701 size); 2692 size);
2702 if (err) { 2693 if (err) {
2703 dev_err(d, "failed to create sg table\n"); 2694 nvgpu_err(g, "failed to create sg table\n");
2704 goto fail; 2695 goto fail;
2705 } 2696 }
2706 sg_dma_address((*sgt)->sgl) = iova; 2697 sg_dma_address((*sgt)->sgl) = iova;
@@ -2723,14 +2714,14 @@ int gk20a_get_sgtable_from_pages(struct device *d, struct sg_table **sgt,
2723 2714
2724 *sgt = nvgpu_kzalloc(g, sizeof(struct sg_table)); 2715 *sgt = nvgpu_kzalloc(g, sizeof(struct sg_table));
2725 if (!(*sgt)) { 2716 if (!(*sgt)) {
2726 dev_err(d, "failed to allocate memory\n"); 2717 nvgpu_err(g, "failed to allocate memory\n");
2727 err = -ENOMEM; 2718 err = -ENOMEM;
2728 goto fail; 2719 goto fail;
2729 } 2720 }
2730 err = sg_alloc_table_from_pages(*sgt, pages, 2721 err = sg_alloc_table_from_pages(*sgt, pages,
2731 DIV_ROUND_UP(size, PAGE_SIZE), 0, size, GFP_KERNEL); 2722 DIV_ROUND_UP(size, PAGE_SIZE), 0, size, GFP_KERNEL);
2732 if (err) { 2723 if (err) {
2733 dev_err(d, "failed to allocate sg_table\n"); 2724 nvgpu_err(g, "failed to allocate sg_table\n");
2734 goto fail; 2725 goto fail;
2735 } 2726 }
2736 sg_dma_address((*sgt)->sgl) = iova; 2727 sg_dma_address((*sgt)->sgl) = iova;
@@ -3049,7 +3040,7 @@ static int update_gmmu_level_locked(struct vm_gk20a *vm,
3049 /* get cpu access to the ptes */ 3040 /* get cpu access to the ptes */
3050 err = map_gmmu_pages(g, next_pte); 3041 err = map_gmmu_pages(g, next_pte);
3051 if (err) { 3042 if (err) {
3052 gk20a_err(dev_from_vm(vm), 3043 nvgpu_err(g,
3053 "couldn't map ptes for update as=%d", 3044 "couldn't map ptes for update as=%d",
3054 vm_aspace_id(vm)); 3045 vm_aspace_id(vm));
3055 return err; 3046 return err;
@@ -3113,7 +3104,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
3113 3104
3114 err = map_gmmu_pages(g, &vm->pdb); 3105 err = map_gmmu_pages(g, &vm->pdb);
3115 if (err) { 3106 if (err) {
3116 gk20a_err(dev_from_vm(vm), 3107 nvgpu_err(g,
3117 "couldn't map ptes for update as=%d", 3108 "couldn't map ptes for update as=%d",
3118 vm_aspace_id(vm)); 3109 vm_aspace_id(vm));
3119 return err; 3110 return err;
@@ -3284,14 +3275,14 @@ void gk20a_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer,
3284 3275
3285void gk20a_vm_unmap(struct vm_gk20a *vm, u64 offset) 3276void gk20a_vm_unmap(struct vm_gk20a *vm, u64 offset)
3286{ 3277{
3287 struct device *d = dev_from_vm(vm); 3278 struct gk20a *g = vm->mm->g;
3288 struct mapped_buffer_node *mapped_buffer; 3279 struct mapped_buffer_node *mapped_buffer;
3289 3280
3290 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 3281 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
3291 mapped_buffer = find_mapped_buffer_locked(vm->mapped_buffers, offset); 3282 mapped_buffer = find_mapped_buffer_locked(vm->mapped_buffers, offset);
3292 if (!mapped_buffer) { 3283 if (!mapped_buffer) {
3293 nvgpu_mutex_release(&vm->update_gmmu_lock); 3284 nvgpu_mutex_release(&vm->update_gmmu_lock);
3294 gk20a_err(d, "invalid addr to unmap 0x%llx", offset); 3285 nvgpu_err(g, "invalid addr to unmap 0x%llx", offset);
3295 return; 3286 return;
3296 } 3287 }
3297 3288
@@ -4195,14 +4186,13 @@ void gk20a_deinit_vm(struct vm_gk20a *vm)
4195 4186
4196int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) 4187int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
4197{ 4188{
4198 struct device *dev = dev_from_gk20a(g);
4199 int err; 4189 int err;
4200 4190
4201 gk20a_dbg_fn(""); 4191 gk20a_dbg_fn("");
4202 4192
4203 err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block); 4193 err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block);
4204 if (err) { 4194 if (err) {
4205 gk20a_err(dev, "%s: memory allocation failed\n", __func__); 4195 nvgpu_err(g, "%s: memory allocation failed\n", __func__);
4206 return err; 4196 return err;
4207 } 4197 }
4208 4198
@@ -4462,8 +4452,7 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
4462 } while (!nvgpu_timeout_expired(&timeout)); 4452 } while (!nvgpu_timeout_expired(&timeout));
4463 4453
4464 if (nvgpu_timeout_peek_expired(&timeout)) 4454 if (nvgpu_timeout_peek_expired(&timeout))
4465 gk20a_warn(dev_from_gk20a(g), 4455 nvgpu_warn(g, "l2_system_invalidate too many retries");
4466 "l2_system_invalidate too many retries");
4467 4456
4468 trace_gk20a_mm_l2_invalidate_done(g->name); 4457 trace_gk20a_mm_l2_invalidate_done(g->name);
4469} 4458}
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 547ba924..38b8da9c 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -27,6 +27,7 @@
27#include <nvgpu/timers.h> 27#include <nvgpu/timers.h>
28#include <nvgpu/kmem.h> 28#include <nvgpu/kmem.h>
29#include <nvgpu/dma.h> 29#include <nvgpu/dma.h>
30#include <nvgpu/log.h>
30 31
31#include "gk20a.h" 32#include "gk20a.h"
32#include "gr_gk20a.h" 33#include "gr_gk20a.h"
@@ -314,7 +315,7 @@ static void printtrace(struct pmu_gk20a *pmu)
314 trace = (char *)tracebuffer; 315 trace = (char *)tracebuffer;
315 trace1 = (u32 *)tracebuffer; 316 trace1 = (u32 *)tracebuffer;
316 317
317 gk20a_err(dev_from_gk20a(g), "Dump pmutrace"); 318 nvgpu_err(g, "Dump pmutrace");
318 for (i = 0; i < GK20A_PMU_TRACE_BUFSIZE; i += 0x40) { 319 for (i = 0; i < GK20A_PMU_TRACE_BUFSIZE; i += 0x40) {
319 for (j = 0; j < 0x40; j++) 320 for (j = 0; j < 0x40; j++)
320 if (trace1[(i / 4) + j]) 321 if (trace1[(i / 4) + j])
@@ -335,7 +336,7 @@ static void printtrace(struct pmu_gk20a *pmu)
335 m += k + 2; 336 m += k + 2;
336 } 337 }
337 scnprintf((buf + count), 0x40, "%s", (trace+i+20+m)); 338 scnprintf((buf + count), 0x40, "%s", (trace+i+20+m));
338 gk20a_err(dev_from_gk20a(g), "%s", buf); 339 nvgpu_err(g, "%s", buf);
339 } 340 }
340 nvgpu_kfree(g, tracebuffer); 341 nvgpu_kfree(g, tracebuffer);
341} 342}
@@ -2184,8 +2185,7 @@ int gk20a_init_pmu(struct pmu_gk20a *pmu)
2184 get_pmu_sequence_out_alloc_ptr_v0; 2185 get_pmu_sequence_out_alloc_ptr_v0;
2185 break; 2186 break;
2186 default: 2187 default:
2187 gk20a_err(dev_from_gk20a(gk20a_from_pmu(pmu)), 2188 nvgpu_err(g, "PMU code version not supported version: %d\n",
2188 "PMU code version not supported version: %d\n",
2189 pmu->desc->app_version); 2189 pmu->desc->app_version);
2190 err = -EINVAL; 2190 err = -EINVAL;
2191 goto fail_pmu_seq; 2191 goto fail_pmu_seq;
@@ -2217,14 +2217,12 @@ void pmu_copy_from_dmem(struct pmu_gk20a *pmu,
2217 u32 *dst_u32 = (u32*)dst; 2217 u32 *dst_u32 = (u32*)dst;
2218 2218
2219 if (size == 0) { 2219 if (size == 0) {
2220 gk20a_err(dev_from_gk20a(g), 2220 nvgpu_err(g, "size is zero");
2221 "size is zero");
2222 return; 2221 return;
2223 } 2222 }
2224 2223
2225 if (src & 0x3) { 2224 if (src & 0x3) {
2226 gk20a_err(dev_from_gk20a(g), 2225 nvgpu_err(g, "src (0x%08x) not 4-byte aligned", src);
2227 "src (0x%08x) not 4-byte aligned", src);
2228 return; 2226 return;
2229 } 2227 }
2230 2228
@@ -2263,14 +2261,12 @@ void pmu_copy_to_dmem(struct pmu_gk20a *pmu,
2263 u32 *src_u32 = (u32*)src; 2261 u32 *src_u32 = (u32*)src;
2264 2262
2265 if (size == 0) { 2263 if (size == 0) {
2266 gk20a_err(dev_from_gk20a(g), 2264 nvgpu_err(g, "size is zero");
2267 "size is zero");
2268 return; 2265 return;
2269 } 2266 }
2270 2267
2271 if (dst & 0x3) { 2268 if (dst & 0x3) {
2272 gk20a_err(dev_from_gk20a(g), 2269 nvgpu_err(g, "dst (0x%08x) not 4-byte aligned", dst);
2273 "dst (0x%08x) not 4-byte aligned", dst);
2274 return; 2270 return;
2275 } 2271 }
2276 2272
@@ -2300,8 +2296,7 @@ void pmu_copy_to_dmem(struct pmu_gk20a *pmu,
2300 data = gk20a_readl(g, pwr_falcon_dmemc_r(port)) & addr_mask; 2296 data = gk20a_readl(g, pwr_falcon_dmemc_r(port)) & addr_mask;
2301 size = ALIGN(size, 4); 2297 size = ALIGN(size, 4);
2302 if (data != ((dst + size) & addr_mask)) { 2298 if (data != ((dst + size) & addr_mask)) {
2303 gk20a_err(dev_from_gk20a(g), 2299 nvgpu_err(g, "copy failed. bytes written %d, expected %d",
2304 "copy failed. bytes written %d, expected %d",
2305 data - dst, size); 2300 data - dst, size);
2306 } 2301 }
2307 nvgpu_mutex_release(&pmu->pmu_copy_lock); 2302 nvgpu_mutex_release(&pmu->pmu_copy_lock);
@@ -2432,7 +2427,7 @@ int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable)
2432 } while (!nvgpu_timeout_expired(&timeout)); 2427 } while (!nvgpu_timeout_expired(&timeout));
2433 2428
2434 g->ops.mc.disable(g, mc_enable_pwr_enabled_f()); 2429 g->ops.mc.disable(g, mc_enable_pwr_enabled_f());
2435 gk20a_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout"); 2430 nvgpu_err(g, "Falcon mem scrubbing timeout");
2436 2431
2437 return -ETIMEDOUT; 2432 return -ETIMEDOUT;
2438 } else { 2433 } else {
@@ -2615,8 +2610,7 @@ static int pmu_seq_acquire(struct pmu_gk20a *pmu,
2615 index = find_first_zero_bit(pmu->pmu_seq_tbl, 2610 index = find_first_zero_bit(pmu->pmu_seq_tbl,
2616 sizeof(pmu->pmu_seq_tbl)); 2611 sizeof(pmu->pmu_seq_tbl));
2617 if (index >= sizeof(pmu->pmu_seq_tbl)) { 2612 if (index >= sizeof(pmu->pmu_seq_tbl)) {
2618 gk20a_err(dev_from_gk20a(g), 2613 nvgpu_err(g, "no free sequence available");
2619 "no free sequence available");
2620 nvgpu_mutex_release(&pmu->pmu_seq_lock); 2614 nvgpu_mutex_release(&pmu->pmu_seq_lock);
2621 return -EAGAIN; 2615 return -EAGAIN;
2622 } 2616 }
@@ -2787,7 +2781,7 @@ int pmu_mutex_acquire(struct pmu_gk20a *pmu, u32 id, u32 *token)
2787 gk20a_readl(g, pwr_pmu_mutex_id_r())); 2781 gk20a_readl(g, pwr_pmu_mutex_id_r()));
2788 if (data == pwr_pmu_mutex_id_value_init_v() || 2782 if (data == pwr_pmu_mutex_id_value_init_v() ||
2789 data == pwr_pmu_mutex_id_value_not_avail_v()) { 2783 data == pwr_pmu_mutex_id_value_not_avail_v()) {
2790 gk20a_warn(dev_from_gk20a(g), 2784 nvgpu_warn(g,
2791 "fail to generate mutex token: val 0x%08x", 2785 "fail to generate mutex token: val 0x%08x",
2792 owner); 2786 owner);
2793 usleep_range(20, 40); 2787 usleep_range(20, 40);
@@ -2844,8 +2838,7 @@ int pmu_mutex_release(struct pmu_gk20a *pmu, u32 id, u32 *token)
2844 gk20a_readl(g, pwr_pmu_mutex_r(mutex->index))); 2838 gk20a_readl(g, pwr_pmu_mutex_r(mutex->index)));
2845 2839
2846 if (*token != owner) { 2840 if (*token != owner) {
2847 gk20a_err(dev_from_gk20a(g), 2841 nvgpu_err(g, "requester 0x%08x NOT match owner 0x%08x",
2848 "requester 0x%08x NOT match owner 0x%08x",
2849 *token, owner); 2842 *token, owner);
2850 return -EINVAL; 2843 return -EINVAL;
2851 } 2844 }
@@ -2953,8 +2946,7 @@ static int pmu_queue_push(struct pmu_gk20a *pmu,
2953 gk20a_dbg_fn(""); 2946 gk20a_dbg_fn("");
2954 2947
2955 if (!queue->opened && queue->oflag == OFLAG_WRITE){ 2948 if (!queue->opened && queue->oflag == OFLAG_WRITE){
2956 gk20a_err(dev_from_gk20a(gk20a_from_pmu(pmu)), 2949 nvgpu_err(gk20a_from_pmu(pmu), "queue not opened for write");
2957 "queue not opened for write");
2958 return -EINVAL; 2950 return -EINVAL;
2959 } 2951 }
2960 2952
@@ -2972,8 +2964,7 @@ static int pmu_queue_pop(struct pmu_gk20a *pmu,
2972 *bytes_read = 0; 2964 *bytes_read = 0;
2973 2965
2974 if (!queue->opened && queue->oflag == OFLAG_READ){ 2966 if (!queue->opened && queue->oflag == OFLAG_READ){
2975 gk20a_err(dev_from_gk20a(gk20a_from_pmu(pmu)), 2967 nvgpu_err(gk20a_from_pmu(pmu), "queue not opened for read");
2976 "queue not opened for read");
2977 return -EINVAL; 2968 return -EINVAL;
2978 } 2969 }
2979 2970
@@ -2989,7 +2980,7 @@ static int pmu_queue_pop(struct pmu_gk20a *pmu,
2989 used = queue->offset + queue->size - tail; 2980 used = queue->offset + queue->size - tail;
2990 2981
2991 if (size > used) { 2982 if (size > used) {
2992 gk20a_warn(dev_from_gk20a(gk20a_from_pmu(pmu)), 2983 nvgpu_warn(gk20a_from_pmu(pmu),
2993 "queue size smaller than request read"); 2984 "queue size smaller than request read");
2994 size = used; 2985 size = used;
2995 } 2986 }
@@ -3008,8 +2999,7 @@ static void pmu_queue_rewind(struct pmu_gk20a *pmu,
3008 gk20a_dbg_fn(""); 2999 gk20a_dbg_fn("");
3009 3000
3010 if (!queue->opened) { 3001 if (!queue->opened) {
3011 gk20a_err(dev_from_gk20a(gk20a_from_pmu(pmu)), 3002 nvgpu_err(gk20a_from_pmu(pmu), "queue not opened");
3012 "queue not opened");
3013 return; 3003 return;
3014 } 3004 }
3015 3005
@@ -3132,7 +3122,6 @@ static int gk20a_prepare_ucode(struct gk20a *g)
3132{ 3122{
3133 struct pmu_gk20a *pmu = &g->pmu; 3123 struct pmu_gk20a *pmu = &g->pmu;
3134 int err = 0; 3124 int err = 0;
3135 struct device *d = dev_from_gk20a(g);
3136 struct mm_gk20a *mm = &g->mm; 3125 struct mm_gk20a *mm = &g->mm;
3137 struct vm_gk20a *vm = &mm->pmu.vm; 3126 struct vm_gk20a *vm = &mm->pmu.vm;
3138 3127
@@ -3141,7 +3130,7 @@ static int gk20a_prepare_ucode(struct gk20a *g)
3141 3130
3142 pmu->fw = nvgpu_request_firmware(g, GK20A_PMU_UCODE_IMAGE, 0); 3131 pmu->fw = nvgpu_request_firmware(g, GK20A_PMU_UCODE_IMAGE, 0);
3143 if (!pmu->fw) { 3132 if (!pmu->fw) {
3144 gk20a_err(d, "failed to load pmu ucode!!"); 3133 nvgpu_err(g, "failed to load pmu ucode!!");
3145 return err; 3134 return err;
3146 } 3135 }
3147 3136
@@ -3173,7 +3162,6 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
3173 struct pmu_gk20a *pmu = &g->pmu; 3162 struct pmu_gk20a *pmu = &g->pmu;
3174 struct mm_gk20a *mm = &g->mm; 3163 struct mm_gk20a *mm = &g->mm;
3175 struct vm_gk20a *vm = &mm->pmu.vm; 3164 struct vm_gk20a *vm = &mm->pmu.vm;
3176 struct device *d = dev_from_gk20a(g);
3177 unsigned int i; 3165 unsigned int i;
3178 int err = 0; 3166 int err = 0;
3179 u8 *ptr; 3167 u8 *ptr;
@@ -3228,7 +3216,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
3228 err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE, 3216 err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE,
3229 &pmu->seq_buf); 3217 &pmu->seq_buf);
3230 if (err) { 3218 if (err) {
3231 gk20a_err(d, "failed to allocate memory\n"); 3219 nvgpu_err(g, "failed to allocate memory\n");
3232 goto err_free_seq; 3220 goto err_free_seq;
3233 } 3221 }
3234 3222
@@ -3245,7 +3233,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
3245 err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE, 3233 err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE,
3246 &pmu->trace_buf); 3234 &pmu->trace_buf);
3247 if (err) { 3235 if (err) {
3248 gk20a_err(d, "failed to allocate pmu trace buffer\n"); 3236 nvgpu_err(g, "failed to allocate pmu trace buffer\n");
3249 goto err_free_seq_buf; 3237 goto err_free_seq_buf;
3250 } 3238 }
3251 3239
@@ -3275,7 +3263,7 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
3275 3263
3276 gk20a_dbg_pmu("reply PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS"); 3264 gk20a_dbg_pmu("reply PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
3277 if (status != 0) { 3265 if (status != 0) {
3278 gk20a_err(dev_from_gk20a(g), "PGENG cmd aborted"); 3266 nvgpu_err(g, "PGENG cmd aborted");
3279 /* TBD: disable ELPG */ 3267 /* TBD: disable ELPG */
3280 return; 3268 return;
3281 } 3269 }
@@ -3283,7 +3271,7 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
3283 pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED); 3271 pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED);
3284 if ((!pmu->buf_loaded) && 3272 if ((!pmu->buf_loaded) &&
3285 (pmu->pmu_state == PMU_STATE_LOADING_PG_BUF)) 3273 (pmu->pmu_state == PMU_STATE_LOADING_PG_BUF))
3286 gk20a_err(dev_from_gk20a(g), "failed to load PGENG buffer"); 3274 nvgpu_err(g, "failed to load PGENG buffer");
3287 else { 3275 else {
3288 schedule_work(&pmu->pg_init); 3276 schedule_work(&pmu->pg_init);
3289 } 3277 }
@@ -3571,7 +3559,7 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
3571 gk20a_dbg_fn(""); 3559 gk20a_dbg_fn("");
3572 3560
3573 if (status != 0) { 3561 if (status != 0) {
3574 gk20a_err(dev_from_gk20a(g), "ELPG cmd aborted"); 3562 nvgpu_err(g, "ELPG cmd aborted");
3575 /* TBD: disable ELPG */ 3563 /* TBD: disable ELPG */
3576 return; 3564 return;
3577 } 3565 }
@@ -3615,7 +3603,7 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
3615 } 3603 }
3616 break; 3604 break;
3617 default: 3605 default:
3618 gk20a_err(dev_from_gk20a(g), 3606 nvgpu_err(g,
3619 "unsupported ELPG message : 0x%04x", elpg_msg->msg); 3607 "unsupported ELPG message : 0x%04x", elpg_msg->msg);
3620 } 3608 }
3621 3609
@@ -3630,7 +3618,7 @@ static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg,
3630 gk20a_dbg_fn(""); 3618 gk20a_dbg_fn("");
3631 3619
3632 if (status != 0) { 3620 if (status != 0) {
3633 gk20a_err(dev_from_gk20a(g), "ELPG cmd aborted"); 3621 nvgpu_err(g, "ELPG cmd aborted");
3634 /* TBD: disable ELPG */ 3622 /* TBD: disable ELPG */
3635 return; 3623 return;
3636 } 3624 }
@@ -3769,7 +3757,7 @@ static u8 get_perfmon_id(struct pmu_gk20a *pmu)
3769 break; 3757 break;
3770#endif 3758#endif
3771 default: 3759 default:
3772 gk20a_err(g->dev, "no support for %x", ver); 3760 nvgpu_err(g, "no support for %x", ver);
3773 BUG(); 3761 BUG();
3774 } 3762 }
3775 3763
@@ -3837,8 +3825,7 @@ static int pmu_init_perfmon(struct pmu_gk20a *pmu)
3837 pmu->sample_buffer = nvgpu_alloc(&pmu->dmem, 3825 pmu->sample_buffer = nvgpu_alloc(&pmu->dmem,
3838 2 * sizeof(u16)); 3826 2 * sizeof(u16));
3839 if (!pmu->sample_buffer) { 3827 if (!pmu->sample_buffer) {
3840 gk20a_err(dev_from_gk20a(g), 3828 nvgpu_err(g, "failed to allocate perfmon sample buffer");
3841 "failed to allocate perfmon sample buffer");
3842 return -ENOMEM; 3829 return -ENOMEM;
3843 } 3830 }
3844 3831
@@ -3893,8 +3880,7 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu,
3893 pmu_copy_from_dmem(pmu, tail, 3880 pmu_copy_from_dmem(pmu, tail,
3894 (u8 *)&msg->hdr, PMU_MSG_HDR_SIZE, 0); 3881 (u8 *)&msg->hdr, PMU_MSG_HDR_SIZE, 0);
3895 if (msg->hdr.unit_id != PMU_UNIT_INIT) { 3882 if (msg->hdr.unit_id != PMU_UNIT_INIT) {
3896 gk20a_err(dev_from_gk20a(g), 3883 nvgpu_err(g, "expecting init msg");
3897 "expecting init msg");
3898 return -EINVAL; 3884 return -EINVAL;
3899 } 3885 }
3900 3886
@@ -3902,8 +3888,7 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu,
3902 (u8 *)&msg->msg, msg->hdr.size - PMU_MSG_HDR_SIZE, 0); 3888 (u8 *)&msg->msg, msg->hdr.size - PMU_MSG_HDR_SIZE, 0);
3903 3889
3904 if (msg->msg.init.msg_type != PMU_INIT_MSG_TYPE_PMU_INIT) { 3890 if (msg->msg.init.msg_type != PMU_INIT_MSG_TYPE_PMU_INIT) {
3905 gk20a_err(dev_from_gk20a(g), 3891 nvgpu_err(g, "expecting init msg");
3906 "expecting init msg");
3907 return -EINVAL; 3892 return -EINVAL;
3908 } 3893 }
3909 3894
@@ -3970,8 +3955,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
3970 3955
3971 err = pmu_queue_open_read(pmu, queue); 3956 err = pmu_queue_open_read(pmu, queue);
3972 if (err) { 3957 if (err) {
3973 gk20a_err(dev_from_gk20a(g), 3958 nvgpu_err(g, "fail to open queue %d for read", queue->id);
3974 "fail to open queue %d for read", queue->id);
3975 *status = err; 3959 *status = err;
3976 return false; 3960 return false;
3977 } 3961 }
@@ -3979,8 +3963,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
3979 err = pmu_queue_pop(pmu, queue, &msg->hdr, 3963 err = pmu_queue_pop(pmu, queue, &msg->hdr,
3980 PMU_MSG_HDR_SIZE, &bytes_read); 3964 PMU_MSG_HDR_SIZE, &bytes_read);
3981 if (err || bytes_read != PMU_MSG_HDR_SIZE) { 3965 if (err || bytes_read != PMU_MSG_HDR_SIZE) {
3982 gk20a_err(dev_from_gk20a(g), 3966 nvgpu_err(g, "fail to read msg from queue %d", queue->id);
3983 "fail to read msg from queue %d", queue->id);
3984 *status = err | -EINVAL; 3967 *status = err | -EINVAL;
3985 goto clean_up; 3968 goto clean_up;
3986 } 3969 }
@@ -3991,7 +3974,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
3991 err = pmu_queue_pop(pmu, queue, &msg->hdr, 3974 err = pmu_queue_pop(pmu, queue, &msg->hdr,
3992 PMU_MSG_HDR_SIZE, &bytes_read); 3975 PMU_MSG_HDR_SIZE, &bytes_read);
3993 if (err || bytes_read != PMU_MSG_HDR_SIZE) { 3976 if (err || bytes_read != PMU_MSG_HDR_SIZE) {
3994 gk20a_err(dev_from_gk20a(g), 3977 nvgpu_err(g,
3995 "fail to read msg from queue %d", queue->id); 3978 "fail to read msg from queue %d", queue->id);
3996 *status = err | -EINVAL; 3979 *status = err | -EINVAL;
3997 goto clean_up; 3980 goto clean_up;
@@ -3999,8 +3982,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
3999 } 3982 }
4000 3983
4001 if (!PMU_UNIT_ID_IS_VALID(msg->hdr.unit_id)) { 3984 if (!PMU_UNIT_ID_IS_VALID(msg->hdr.unit_id)) {
4002 gk20a_err(dev_from_gk20a(g), 3985 nvgpu_err(g, "read invalid unit_id %d from queue %d",
4003 "read invalid unit_id %d from queue %d",
4004 msg->hdr.unit_id, queue->id); 3986 msg->hdr.unit_id, queue->id);
4005 *status = -EINVAL; 3987 *status = -EINVAL;
4006 goto clean_up; 3988 goto clean_up;
@@ -4011,7 +3993,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
4011 err = pmu_queue_pop(pmu, queue, &msg->msg, 3993 err = pmu_queue_pop(pmu, queue, &msg->msg,
4012 read_size, &bytes_read); 3994 read_size, &bytes_read);
4013 if (err || bytes_read != read_size) { 3995 if (err || bytes_read != read_size) {
4014 gk20a_err(dev_from_gk20a(g), 3996 nvgpu_err(g,
4015 "fail to read msg from queue %d", queue->id); 3997 "fail to read msg from queue %d", queue->id);
4016 *status = err; 3998 *status = err;
4017 goto clean_up; 3999 goto clean_up;
@@ -4020,8 +4002,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
4020 4002
4021 err = pmu_queue_close(pmu, queue, true); 4003 err = pmu_queue_close(pmu, queue, true);
4022 if (err) { 4004 if (err) {
4023 gk20a_err(dev_from_gk20a(g), 4005 nvgpu_err(g, "fail to close queue %d", queue->id);
4024 "fail to close queue %d", queue->id);
4025 *status = err; 4006 *status = err;
4026 return false; 4007 return false;
4027 } 4008 }
@@ -4031,8 +4012,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
4031clean_up: 4012clean_up:
4032 err = pmu_queue_close(pmu, queue, false); 4013 err = pmu_queue_close(pmu, queue, false);
4033 if (err) 4014 if (err)
4034 gk20a_err(dev_from_gk20a(g), 4015 nvgpu_err(g, "fail to close queue %d", queue->id);
4035 "fail to close queue %d", queue->id);
4036 return false; 4016 return false;
4037} 4017}
4038 4018
@@ -4049,23 +4029,20 @@ static int pmu_response_handle(struct pmu_gk20a *pmu,
4049 seq = &pmu->seq[msg->hdr.seq_id]; 4029 seq = &pmu->seq[msg->hdr.seq_id];
4050 if (seq->state != PMU_SEQ_STATE_USED && 4030 if (seq->state != PMU_SEQ_STATE_USED &&
4051 seq->state != PMU_SEQ_STATE_CANCELLED) { 4031 seq->state != PMU_SEQ_STATE_CANCELLED) {
4052 gk20a_err(dev_from_gk20a(g), 4032 nvgpu_err(g, "msg for an unknown sequence %d", seq->id);
4053 "msg for an unknown sequence %d", seq->id);
4054 return -EINVAL; 4033 return -EINVAL;
4055 } 4034 }
4056 4035
4057 if (msg->hdr.unit_id == PMU_UNIT_RC && 4036 if (msg->hdr.unit_id == PMU_UNIT_RC &&
4058 msg->msg.rc.msg_type == PMU_RC_MSG_TYPE_UNHANDLED_CMD) { 4037 msg->msg.rc.msg_type == PMU_RC_MSG_TYPE_UNHANDLED_CMD) {
4059 gk20a_err(dev_from_gk20a(g), 4038 nvgpu_err(g, "unhandled cmd: seq %d", seq->id);
4060 "unhandled cmd: seq %d", seq->id);
4061 } 4039 }
4062 else if (seq->state != PMU_SEQ_STATE_CANCELLED) { 4040 else if (seq->state != PMU_SEQ_STATE_CANCELLED) {
4063 if (seq->msg) { 4041 if (seq->msg) {
4064 if (seq->msg->hdr.size >= msg->hdr.size) { 4042 if (seq->msg->hdr.size >= msg->hdr.size) {
4065 memcpy(seq->msg, msg, msg->hdr.size); 4043 memcpy(seq->msg, msg, msg->hdr.size);
4066 } else { 4044 } else {
4067 gk20a_err(dev_from_gk20a(g), 4045 nvgpu_err(g, "sequence %d msg buffer too small",
4068 "sequence %d msg buffer too small",
4069 seq->id); 4046 seq->id);
4070 } 4047 }
4071 } 4048 }
@@ -4158,7 +4135,7 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
4158 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), 4135 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
4159 &pmu->zbc_save_done, 1); 4136 &pmu->zbc_save_done, 1);
4160 if (!pmu->zbc_save_done) 4137 if (!pmu->zbc_save_done)
4161 gk20a_err(dev_from_gk20a(g), "ZBC save timeout"); 4138 nvgpu_err(g, "ZBC save timeout");
4162} 4139}
4163 4140
4164static int pmu_perfmon_start_sampling(struct pmu_gk20a *pmu) 4141static int pmu_perfmon_start_sampling(struct pmu_gk20a *pmu)
@@ -4451,118 +4428,118 @@ void pmu_dump_falcon_stats(struct pmu_gk20a *pmu)
4451 struct gk20a *g = gk20a_from_pmu(pmu); 4428 struct gk20a *g = gk20a_from_pmu(pmu);
4452 unsigned int i; 4429 unsigned int i;
4453 4430
4454 gk20a_err(dev_from_gk20a(g), "pwr_falcon_os_r : %d", 4431 nvgpu_err(g, "pwr_falcon_os_r : %d",
4455 gk20a_readl(g, pwr_falcon_os_r())); 4432 gk20a_readl(g, pwr_falcon_os_r()));
4456 gk20a_err(dev_from_gk20a(g), "pwr_falcon_cpuctl_r : 0x%x", 4433 nvgpu_err(g, "pwr_falcon_cpuctl_r : 0x%x",
4457 gk20a_readl(g, pwr_falcon_cpuctl_r())); 4434 gk20a_readl(g, pwr_falcon_cpuctl_r()));
4458 gk20a_err(dev_from_gk20a(g), "pwr_falcon_idlestate_r : 0x%x", 4435 nvgpu_err(g, "pwr_falcon_idlestate_r : 0x%x",
4459 gk20a_readl(g, pwr_falcon_idlestate_r())); 4436 gk20a_readl(g, pwr_falcon_idlestate_r()));
4460 gk20a_err(dev_from_gk20a(g), "pwr_falcon_mailbox0_r : 0x%x", 4437 nvgpu_err(g, "pwr_falcon_mailbox0_r : 0x%x",
4461 gk20a_readl(g, pwr_falcon_mailbox0_r())); 4438 gk20a_readl(g, pwr_falcon_mailbox0_r()));
4462 gk20a_err(dev_from_gk20a(g), "pwr_falcon_mailbox1_r : 0x%x", 4439 nvgpu_err(g, "pwr_falcon_mailbox1_r : 0x%x",
4463 gk20a_readl(g, pwr_falcon_mailbox1_r())); 4440 gk20a_readl(g, pwr_falcon_mailbox1_r()));
4464 gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqstat_r : 0x%x", 4441 nvgpu_err(g, "pwr_falcon_irqstat_r : 0x%x",
4465 gk20a_readl(g, pwr_falcon_irqstat_r())); 4442 gk20a_readl(g, pwr_falcon_irqstat_r()));
4466 gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqmode_r : 0x%x", 4443 nvgpu_err(g, "pwr_falcon_irqmode_r : 0x%x",
4467 gk20a_readl(g, pwr_falcon_irqmode_r())); 4444 gk20a_readl(g, pwr_falcon_irqmode_r()));
4468 gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqmask_r : 0x%x", 4445 nvgpu_err(g, "pwr_falcon_irqmask_r : 0x%x",
4469 gk20a_readl(g, pwr_falcon_irqmask_r())); 4446 gk20a_readl(g, pwr_falcon_irqmask_r()));
4470 gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqdest_r : 0x%x", 4447 nvgpu_err(g, "pwr_falcon_irqdest_r : 0x%x",
4471 gk20a_readl(g, pwr_falcon_irqdest_r())); 4448 gk20a_readl(g, pwr_falcon_irqdest_r()));
4472 4449
4473 for (i = 0; i < pwr_pmu_mailbox__size_1_v(); i++) 4450 for (i = 0; i < pwr_pmu_mailbox__size_1_v(); i++)
4474 gk20a_err(dev_from_gk20a(g), "pwr_pmu_mailbox_r(%d) : 0x%x", 4451 nvgpu_err(g, "pwr_pmu_mailbox_r(%d) : 0x%x",
4475 i, gk20a_readl(g, pwr_pmu_mailbox_r(i))); 4452 i, gk20a_readl(g, pwr_pmu_mailbox_r(i)));
4476 4453
4477 for (i = 0; i < pwr_pmu_debug__size_1_v(); i++) 4454 for (i = 0; i < pwr_pmu_debug__size_1_v(); i++)
4478 gk20a_err(dev_from_gk20a(g), "pwr_pmu_debug_r(%d) : 0x%x", 4455 nvgpu_err(g, "pwr_pmu_debug_r(%d) : 0x%x",
4479 i, gk20a_readl(g, pwr_pmu_debug_r(i))); 4456 i, gk20a_readl(g, pwr_pmu_debug_r(i)));
4480 4457
4481 for (i = 0; i < 6/*NV_PPWR_FALCON_ICD_IDX_RSTAT__SIZE_1*/; i++) { 4458 for (i = 0; i < 6/*NV_PPWR_FALCON_ICD_IDX_RSTAT__SIZE_1*/; i++) {
4482 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), 4459 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
4483 pwr_pmu_falcon_icd_cmd_opc_rstat_f() | 4460 pwr_pmu_falcon_icd_cmd_opc_rstat_f() |
4484 pwr_pmu_falcon_icd_cmd_idx_f(i)); 4461 pwr_pmu_falcon_icd_cmd_idx_f(i));
4485 gk20a_err(dev_from_gk20a(g), "pmu_rstat (%d) : 0x%x", 4462 nvgpu_err(g, "pmu_rstat (%d) : 0x%x",
4486 i, gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); 4463 i, gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
4487 } 4464 }
4488 4465
4489 i = gk20a_readl(g, pwr_pmu_bar0_error_status_r()); 4466 i = gk20a_readl(g, pwr_pmu_bar0_error_status_r());
4490 gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_error_status_r : 0x%x", i); 4467 nvgpu_err(g, "pwr_pmu_bar0_error_status_r : 0x%x", i);
4491 if (i != 0) { 4468 if (i != 0) {
4492 gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_addr_r : 0x%x", 4469 nvgpu_err(g, "pwr_pmu_bar0_addr_r : 0x%x",
4493 gk20a_readl(g, pwr_pmu_bar0_addr_r())); 4470 gk20a_readl(g, pwr_pmu_bar0_addr_r()));
4494 gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_data_r : 0x%x", 4471 nvgpu_err(g, "pwr_pmu_bar0_data_r : 0x%x",
4495 gk20a_readl(g, pwr_pmu_bar0_data_r())); 4472 gk20a_readl(g, pwr_pmu_bar0_data_r()));
4496 gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_timeout_r : 0x%x", 4473 nvgpu_err(g, "pwr_pmu_bar0_timeout_r : 0x%x",
4497 gk20a_readl(g, pwr_pmu_bar0_timeout_r())); 4474 gk20a_readl(g, pwr_pmu_bar0_timeout_r()));
4498 gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_ctl_r : 0x%x", 4475 nvgpu_err(g, "pwr_pmu_bar0_ctl_r : 0x%x",
4499 gk20a_readl(g, pwr_pmu_bar0_ctl_r())); 4476 gk20a_readl(g, pwr_pmu_bar0_ctl_r()));
4500 } 4477 }
4501 4478
4502 i = gk20a_readl(g, pwr_pmu_bar0_fecs_error_r()); 4479 i = gk20a_readl(g, pwr_pmu_bar0_fecs_error_r());
4503 gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_fecs_error_r : 0x%x", i); 4480 nvgpu_err(g, "pwr_pmu_bar0_fecs_error_r : 0x%x", i);
4504 4481
4505 i = gk20a_readl(g, pwr_falcon_exterrstat_r()); 4482 i = gk20a_readl(g, pwr_falcon_exterrstat_r());
4506 gk20a_err(dev_from_gk20a(g), "pwr_falcon_exterrstat_r : 0x%x", i); 4483 nvgpu_err(g, "pwr_falcon_exterrstat_r : 0x%x", i);
4507 if (pwr_falcon_exterrstat_valid_v(i) == 4484 if (pwr_falcon_exterrstat_valid_v(i) ==
4508 pwr_falcon_exterrstat_valid_true_v()) { 4485 pwr_falcon_exterrstat_valid_true_v()) {
4509 gk20a_err(dev_from_gk20a(g), "pwr_falcon_exterraddr_r : 0x%x", 4486 nvgpu_err(g, "pwr_falcon_exterraddr_r : 0x%x",
4510 gk20a_readl(g, pwr_falcon_exterraddr_r())); 4487 gk20a_readl(g, pwr_falcon_exterraddr_r()));
4511 gk20a_err(dev_from_gk20a(g), "pmc_enable : 0x%x", 4488 nvgpu_err(g, "pmc_enable : 0x%x",
4512 gk20a_readl(g, mc_enable_r())); 4489 gk20a_readl(g, mc_enable_r()));
4513 } 4490 }
4514 4491
4515 gk20a_err(dev_from_gk20a(g), "pwr_falcon_engctl_r : 0x%x", 4492 nvgpu_err(g, "pwr_falcon_engctl_r : 0x%x",
4516 gk20a_readl(g, pwr_falcon_engctl_r())); 4493 gk20a_readl(g, pwr_falcon_engctl_r()));
4517 gk20a_err(dev_from_gk20a(g), "pwr_falcon_curctx_r : 0x%x", 4494 nvgpu_err(g, "pwr_falcon_curctx_r : 0x%x",
4518 gk20a_readl(g, pwr_falcon_curctx_r())); 4495 gk20a_readl(g, pwr_falcon_curctx_r()));
4519 gk20a_err(dev_from_gk20a(g), "pwr_falcon_nxtctx_r : 0x%x", 4496 nvgpu_err(g, "pwr_falcon_nxtctx_r : 0x%x",
4520 gk20a_readl(g, pwr_falcon_nxtctx_r())); 4497 gk20a_readl(g, pwr_falcon_nxtctx_r()));
4521 4498
4522 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), 4499 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
4523 pwr_pmu_falcon_icd_cmd_opc_rreg_f() | 4500 pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
4524 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_IMB)); 4501 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_IMB));
4525 gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_IMB : 0x%x", 4502 nvgpu_err(g, "PMU_FALCON_REG_IMB : 0x%x",
4526 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); 4503 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
4527 4504
4528 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), 4505 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
4529 pwr_pmu_falcon_icd_cmd_opc_rreg_f() | 4506 pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
4530 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_DMB)); 4507 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_DMB));
4531 gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_DMB : 0x%x", 4508 nvgpu_err(g, "PMU_FALCON_REG_DMB : 0x%x",
4532 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); 4509 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
4533 4510
4534 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), 4511 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
4535 pwr_pmu_falcon_icd_cmd_opc_rreg_f() | 4512 pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
4536 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_CSW)); 4513 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_CSW));
4537 gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_CSW : 0x%x", 4514 nvgpu_err(g, "PMU_FALCON_REG_CSW : 0x%x",
4538 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); 4515 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
4539 4516
4540 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), 4517 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
4541 pwr_pmu_falcon_icd_cmd_opc_rreg_f() | 4518 pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
4542 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_CTX)); 4519 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_CTX));
4543 gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_CTX : 0x%x", 4520 nvgpu_err(g, "PMU_FALCON_REG_CTX : 0x%x",
4544 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); 4521 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
4545 4522
4546 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), 4523 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
4547 pwr_pmu_falcon_icd_cmd_opc_rreg_f() | 4524 pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
4548 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_EXCI)); 4525 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_EXCI));
4549 gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_EXCI : 0x%x", 4526 nvgpu_err(g, "PMU_FALCON_REG_EXCI : 0x%x",
4550 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); 4527 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
4551 4528
4552 for (i = 0; i < 4; i++) { 4529 for (i = 0; i < 4; i++) {
4553 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), 4530 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
4554 pwr_pmu_falcon_icd_cmd_opc_rreg_f() | 4531 pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
4555 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_PC)); 4532 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_PC));
4556 gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_PC : 0x%x", 4533 nvgpu_err(g, "PMU_FALCON_REG_PC : 0x%x",
4557 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); 4534 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
4558 4535
4559 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), 4536 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
4560 pwr_pmu_falcon_icd_cmd_opc_rreg_f() | 4537 pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
4561 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_SP)); 4538 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_SP));
4562 gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_SP : 0x%x", 4539 nvgpu_err(g, "PMU_FALCON_REG_SP : 0x%x",
4563 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); 4540 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
4564 } 4541 }
4565 gk20a_err(dev_from_gk20a(g), "elpg stat: %d\n", 4542 nvgpu_err(g, "elpg stat: %d\n",
4566 pmu->elpg_stat); 4543 pmu->elpg_stat);
4567 4544
4568 /* PMU may crash due to FECS crash. Dump FECS status */ 4545 /* PMU may crash due to FECS crash. Dump FECS status */
@@ -4600,8 +4577,7 @@ void gk20a_pmu_isr(struct gk20a *g)
4600 } 4577 }
4601 4578
4602 if (intr & pwr_falcon_irqstat_halt_true_f()) { 4579 if (intr & pwr_falcon_irqstat_halt_true_f()) {
4603 gk20a_err(dev_from_gk20a(g), 4580 nvgpu_err(g, "pmu halt intr not implemented");
4604 "pmu halt intr not implemented");
4605 pmu_dump_falcon_stats(pmu); 4581 pmu_dump_falcon_stats(pmu);
4606 if (gk20a_readl(g, pwr_pmu_mailbox_r 4582 if (gk20a_readl(g, pwr_pmu_mailbox_r
4607 (PMU_MODE_MISMATCH_STATUS_MAILBOX_R)) == 4583 (PMU_MODE_MISMATCH_STATUS_MAILBOX_R)) ==
@@ -4610,7 +4586,7 @@ void gk20a_pmu_isr(struct gk20a *g)
4610 g->ops.pmu.dump_secure_fuses(g); 4586 g->ops.pmu.dump_secure_fuses(g);
4611 } 4587 }
4612 if (intr & pwr_falcon_irqstat_exterr_true_f()) { 4588 if (intr & pwr_falcon_irqstat_exterr_true_f()) {
4613 gk20a_err(dev_from_gk20a(g), 4589 nvgpu_err(g,
4614 "pmu exterr intr not implemented. Clearing interrupt."); 4590 "pmu exterr intr not implemented. Clearing interrupt.");
4615 pmu_dump_falcon_stats(pmu); 4591 pmu_dump_falcon_stats(pmu);
4616 4592
@@ -4692,7 +4668,7 @@ static bool pmu_validate_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd,
4692 return true; 4668 return true;
4693 4669
4694invalid_cmd: 4670invalid_cmd:
4695 gk20a_err(dev_from_gk20a(g), "invalid pmu cmd :\n" 4671 nvgpu_err(g, "invalid pmu cmd :\n"
4696 "queue_id=%d,\n" 4672 "queue_id=%d,\n"
4697 "cmd_size=%d, cmd_unit_id=%d, msg=%p, msg_size=%d,\n" 4673 "cmd_size=%d, cmd_unit_id=%d, msg=%p, msg_size=%d,\n"
4698 "payload in=%p, in_size=%d, in_offset=%d,\n" 4674 "payload in=%p, in_size=%d, in_offset=%d,\n"
@@ -4736,8 +4712,7 @@ static int pmu_write_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd,
4736 4712
4737clean_up: 4713clean_up:
4738 if (err) 4714 if (err)
4739 gk20a_err(dev_from_gk20a(g), 4715 nvgpu_err(g, "fail to write cmd to queue %d", queue_id);
4740 "fail to write cmd to queue %d", queue_id);
4741 else 4716 else
4742 gk20a_dbg_fn("done"); 4717 gk20a_dbg_fn("done");
4743 4718
@@ -4762,7 +4737,7 @@ int gk20a_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
4762 4737
4763 err = nvgpu_dma_alloc_map_vid(vm, size, mem); 4738 err = nvgpu_dma_alloc_map_vid(vm, size, mem);
4764 if (err) { 4739 if (err) {
4765 gk20a_err(g->dev, "memory allocation failed"); 4740 nvgpu_err(g, "memory allocation failed");
4766 return -ENOMEM; 4741 return -ENOMEM;
4767 } 4742 }
4768 4743
@@ -4778,7 +4753,7 @@ int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
4778 4753
4779 err = nvgpu_dma_alloc_map_sys(vm, size, mem); 4754 err = nvgpu_dma_alloc_map_sys(vm, size, mem);
4780 if (err) { 4755 if (err) {
4781 gk20a_err(g->dev, "failed to allocate memory\n"); 4756 nvgpu_err(g, "failed to allocate memory\n");
4782 return -ENOMEM; 4757 return -ENOMEM;
4783 } 4758 }
4784 4759
@@ -4806,14 +4781,11 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
4806 4781
4807 if ((!cmd) || (!seq_desc) || (!pmu->pmu_ready)) { 4782 if ((!cmd) || (!seq_desc) || (!pmu->pmu_ready)) {
4808 if (!cmd) 4783 if (!cmd)
4809 gk20a_warn(dev_from_gk20a(g), 4784 nvgpu_warn(g, "%s(): PMU cmd buffer is NULL", __func__);
4810 "%s(): PMU cmd buffer is NULL", __func__);
4811 else if (!seq_desc) 4785 else if (!seq_desc)
4812 gk20a_warn(dev_from_gk20a(g), 4786 nvgpu_warn(g, "%s(): Seq descriptor is NULL", __func__);
4813 "%s(): Seq descriptor is NULL", __func__);
4814 else 4787 else
4815 gk20a_warn(dev_from_gk20a(g), 4788 nvgpu_warn(g, "%s(): PMU is not ready", __func__);
4816 "%s(): PMU is not ready", __func__);
4817 4789
4818 WARN_ON(1); 4790 WARN_ON(1);
4819 return -EINVAL; 4791 return -EINVAL;
@@ -5044,9 +5016,9 @@ int gk20a_pmu_enable_elpg(struct gk20a *g)
5044 5016
5045 /* something is not right if we end up in following code path */ 5017 /* something is not right if we end up in following code path */
5046 if (unlikely(pmu->elpg_refcnt > 1)) { 5018 if (unlikely(pmu->elpg_refcnt > 1)) {
5047 gk20a_warn(dev_from_gk20a(g), 5019 nvgpu_warn(g,
5048 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d", 5020 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
5049 __func__, pmu->elpg_refcnt); 5021 __func__, pmu->elpg_refcnt);
5050 WARN_ON(1); 5022 WARN_ON(1);
5051 } 5023 }
5052 5024
@@ -5102,9 +5074,9 @@ int gk20a_pmu_disable_elpg(struct gk20a *g)
5102 5074
5103 pmu->elpg_refcnt--; 5075 pmu->elpg_refcnt--;
5104 if (pmu->elpg_refcnt > 0) { 5076 if (pmu->elpg_refcnt > 0) {
5105 gk20a_warn(dev_from_gk20a(g), 5077 nvgpu_warn(g,
5106 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d", 5078 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
5107 __func__, pmu->elpg_refcnt); 5079 __func__, pmu->elpg_refcnt);
5108 WARN_ON(1); 5080 WARN_ON(1);
5109 ret = 0; 5081 ret = 0;
5110 goto exit_unlock; 5082 goto exit_unlock;
@@ -5123,8 +5095,7 @@ int gk20a_pmu_disable_elpg(struct gk20a *g)
5123 &pmu->elpg_stat, PMU_ELPG_STAT_ON); 5095 &pmu->elpg_stat, PMU_ELPG_STAT_ON);
5124 5096
5125 if (pmu->elpg_stat != PMU_ELPG_STAT_ON) { 5097 if (pmu->elpg_stat != PMU_ELPG_STAT_ON) {
5126 gk20a_err(dev_from_gk20a(g), 5098 nvgpu_err(g, "ELPG_ALLOW_ACK failed, elpg_stat=%d",
5127 "ELPG_ALLOW_ACK failed, elpg_stat=%d",
5128 pmu->elpg_stat); 5099 pmu->elpg_stat);
5129 pmu_dump_elpg_stats(pmu); 5100 pmu_dump_elpg_stats(pmu);
5130 pmu_dump_falcon_stats(pmu); 5101 pmu_dump_falcon_stats(pmu);
@@ -5175,8 +5146,7 @@ int gk20a_pmu_disable_elpg(struct gk20a *g)
5175 gk20a_get_gr_idle_timeout(g), 5146 gk20a_get_gr_idle_timeout(g),
5176 ptr, PMU_ELPG_STAT_OFF); 5147 ptr, PMU_ELPG_STAT_OFF);
5177 if (*ptr != PMU_ELPG_STAT_OFF) { 5148 if (*ptr != PMU_ELPG_STAT_OFF) {
5178 gk20a_err(dev_from_gk20a(g), 5149 nvgpu_err(g, "ELPG_DISALLOW_ACK failed");
5179 "ELPG_DISALLOW_ACK failed");
5180 pmu_dump_elpg_stats(pmu); 5150 pmu_dump_elpg_stats(pmu);
5181 pmu_dump_falcon_stats(pmu); 5151 pmu_dump_falcon_stats(pmu);
5182 ret = -EBUSY; 5152 ret = -EBUSY;
diff --git a/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c b/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c
index 752ee121..08198776 100644
--- a/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c
@@ -20,6 +20,8 @@
20 20
21#include "gk20a.h" 21#include "gk20a.h"
22 22
23#include <nvgpu/log.h>
24
23#include <nvgpu/hw/gk20a/hw_mc_gk20a.h> 25#include <nvgpu/hw/gk20a/hw_mc_gk20a.h>
24#include <nvgpu/hw/gk20a/hw_pri_ringmaster_gk20a.h> 26#include <nvgpu/hw/gk20a/hw_pri_ringmaster_gk20a.h>
25#include <nvgpu/hw/gk20a/hw_pri_ringstation_sys_gk20a.h> 27#include <nvgpu/hw/gk20a/hw_pri_ringstation_sys_gk20a.h>
@@ -121,6 +123,5 @@ void gk20a_priv_ring_isr(struct gk20a *g)
121 } while (cmd != pri_ringmaster_command_cmd_no_cmd_v() && --retry); 123 } while (cmd != pri_ringmaster_command_cmd_no_cmd_v() && --retry);
122 124
123 if (retry <= 0) 125 if (retry <= 0)
124 gk20a_warn(dev_from_gk20a(g), 126 nvgpu_warn(g, "priv ringmaster cmd ack too many retries");
125 "priv ringmaster cmd ack too many retries");
126} 127}
diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
index 9fa7514a..b19b16d7 100644
--- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
@@ -25,7 +25,7 @@
25#include "dbg_gpu_gk20a.h" 25#include "dbg_gpu_gk20a.h"
26#include "regops_gk20a.h" 26#include "regops_gk20a.h"
27 27
28 28#include <nvgpu/log.h>
29 29
30static int regop_bsearch_range_cmp(const void *pkey, const void *pelem) 30static int regop_bsearch_range_cmp(const void *pkey, const void *pelem)
31{ 31{
@@ -408,7 +408,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
408 ops, num_ops); 408 ops, num_ops);
409 409
410 if (!ok) { 410 if (!ok) {
411 dev_err(dbg_s->dev, "invalid op(s)"); 411 nvgpu_err(g, "invalid op(s)");
412 err = -EINVAL; 412 err = -EINVAL;
413 /* each op has its own err/status */ 413 /* each op has its own err/status */
414 goto clean_up; 414 goto clean_up;
@@ -527,7 +527,6 @@ static int validate_reg_op_info(struct dbg_session_gk20a *dbg_s,
527 break; 527 break;
528 default: 528 default:
529 op->status |= REGOP(STATUS_UNSUPPORTED_OP); 529 op->status |= REGOP(STATUS_UNSUPPORTED_OP);
530 /*gk20a_err(dbg_s->dev, "Invalid regops op %d!", op->op);*/
531 err = -EINVAL; 530 err = -EINVAL;
532 break; 531 break;
533 } 532 }
@@ -546,7 +545,6 @@ static int validate_reg_op_info(struct dbg_session_gk20a *dbg_s,
546 */ 545 */
547 default: 546 default:
548 op->status |= REGOP(STATUS_INVALID_TYPE); 547 op->status |= REGOP(STATUS_INVALID_TYPE);
549 /*gk20a_err(dbg_s->dev, "Invalid regops type %d!", op->type);*/
550 err = -EINVAL; 548 err = -EINVAL;
551 break; 549 break;
552 } 550 }
@@ -593,7 +591,7 @@ static bool check_whitelists(struct dbg_session_gk20a *dbg_s,
593 } else if (op->type == REGOP(TYPE_GR_CTX)) { 591 } else if (op->type == REGOP(TYPE_GR_CTX)) {
594 /* it's a context-relative op */ 592 /* it's a context-relative op */
595 if (!ch) { 593 if (!ch) {
596 gk20a_err(dbg_s->dev, "can't perform ctx regop unless bound"); 594 nvgpu_err(dbg_s->g, "can't perform ctx regop unless bound");
597 op->status = REGOP(STATUS_UNSUPPORTED_OP); 595 op->status = REGOP(STATUS_UNSUPPORTED_OP);
598 return valid; 596 return valid;
599 } 597 }
@@ -637,7 +635,7 @@ static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s,
637 635
638 /* support only 24-bit 4-byte aligned offsets */ 636 /* support only 24-bit 4-byte aligned offsets */
639 if (offset & 0xFF000003) { 637 if (offset & 0xFF000003) {
640 gk20a_err(dbg_s->dev, "invalid regop offset: 0x%x\n", offset); 638 nvgpu_err(dbg_s->g, "invalid regop offset: 0x%x\n", offset);
641 op->status |= REGOP(STATUS_INVALID_OFFSET); 639 op->status |= REGOP(STATUS_INVALID_OFFSET);
642 return -EINVAL; 640 return -EINVAL;
643 } 641 }
@@ -675,7 +673,7 @@ static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s,
675 } 673 }
676 674
677 if (!valid) { 675 if (!valid) {
678 gk20a_err(dbg_s->dev, "invalid regop offset: 0x%x\n", offset); 676 nvgpu_err(dbg_s->g, "invalid regop offset: 0x%x\n", offset);
679 op->status |= REGOP(STATUS_INVALID_OFFSET); 677 op->status |= REGOP(STATUS_INVALID_OFFSET);
680 return -EINVAL; 678 return -EINVAL;
681 } 679 }
diff --git a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
index a58de920..1d7fd313 100644
--- a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
@@ -26,6 +26,7 @@
26#include <uapi/linux/nvgpu.h> 26#include <uapi/linux/nvgpu.h>
27 27
28#include <nvgpu/kmem.h> 28#include <nvgpu/kmem.h>
29#include <nvgpu/log.h>
29 30
30#include "ctxsw_trace_gk20a.h" 31#include "ctxsw_trace_gk20a.h"
31#include "gk20a.h" 32#include "gk20a.h"
@@ -330,8 +331,7 @@ static int gk20a_sched_dev_ioctl_get_tsg(struct gk20a_sched_ctrl *sched,
330 331
331 nvgpu_mutex_acquire(&sched->status_lock); 332 nvgpu_mutex_acquire(&sched->status_lock);
332 if (NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) { 333 if (NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) {
333 gk20a_warn(dev_from_gk20a(g), 334 nvgpu_warn(g, "tsgid=%d already referenced", tsgid);
334 "tsgid=%d already referenced", tsgid);
335 /* unlock status_lock as gk20a_tsg_release locks it */ 335 /* unlock status_lock as gk20a_tsg_release locks it */
336 nvgpu_mutex_release(&sched->status_lock); 336 nvgpu_mutex_release(&sched->status_lock);
337 kref_put(&tsg->refcount, gk20a_tsg_release); 337 kref_put(&tsg->refcount, gk20a_tsg_release);
@@ -363,8 +363,7 @@ static int gk20a_sched_dev_ioctl_put_tsg(struct gk20a_sched_ctrl *sched,
363 nvgpu_mutex_acquire(&sched->status_lock); 363 nvgpu_mutex_acquire(&sched->status_lock);
364 if (!NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) { 364 if (!NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) {
365 nvgpu_mutex_release(&sched->status_lock); 365 nvgpu_mutex_release(&sched->status_lock);
366 gk20a_warn(dev_from_gk20a(g), 366 nvgpu_warn(g, "tsgid=%d not previously referenced", tsgid);
367 "tsgid=%d not previously referenced", tsgid);
368 return -ENXIO; 367 return -ENXIO;
369 } 368 }
370 NVGPU_SCHED_CLR(tsgid, sched->ref_tsg_bitmap); 369 NVGPU_SCHED_CLR(tsgid, sched->ref_tsg_bitmap);
diff --git a/drivers/gpu/nvgpu/gk20a/sim_gk20a.c b/drivers/gpu/nvgpu/gk20a/sim_gk20a.c
index 76d29ee5..8951d5a4 100644
--- a/drivers/gpu/nvgpu/gk20a/sim_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/sim_gk20a.c
@@ -20,6 +20,8 @@
20 20
21#include "gk20a.h" 21#include "gk20a.h"
22 22
23#include <nvgpu/log.h>
24
23#include <nvgpu/hw/gk20a/hw_sim_gk20a.h> 25#include <nvgpu/hw/gk20a/hw_sim_gk20a.h>
24 26
25static inline void sim_writel(struct gk20a *g, u32 r, u32 v) 27static inline void sim_writel(struct gk20a *g, u32 r, u32 v)
@@ -65,7 +67,7 @@ static void gk20a_remove_sim_support(struct sim_gk20a *s)
65 gk20a_free_sim_support(g); 67 gk20a_free_sim_support(g);
66} 68}
67 69
68static int alloc_and_kmap_iopage(struct device *d, 70static int alloc_and_kmap_iopage(struct gk20a *g,
69 void **kvaddr, 71 void **kvaddr,
70 u64 *phys, 72 u64 *phys,
71 struct page **page) 73 struct page **page)
@@ -75,14 +77,14 @@ static int alloc_and_kmap_iopage(struct device *d,
75 77
76 if (!*page) { 78 if (!*page) {
77 err = -ENOMEM; 79 err = -ENOMEM;
78 dev_err(d, "couldn't allocate io page\n"); 80 nvgpu_err(g, "couldn't allocate io page\n");
79 goto fail; 81 goto fail;
80 } 82 }
81 83
82 *kvaddr = kmap(*page); 84 *kvaddr = kmap(*page);
83 if (!*kvaddr) { 85 if (!*kvaddr) {
84 err = -ENOMEM; 86 err = -ENOMEM;
85 dev_err(d, "couldn't kmap io page\n"); 87 nvgpu_err(g, "couldn't kmap io page\n");
86 goto fail; 88 goto fail;
87 } 89 }
88 *phys = page_to_phys(*page); 90 *phys = page_to_phys(*page);
@@ -105,27 +107,27 @@ int gk20a_init_sim_support(struct platform_device *pdev)
105 g->sim.regs = gk20a_ioremap_resource(pdev, GK20A_SIM_IORESOURCE_MEM, 107 g->sim.regs = gk20a_ioremap_resource(pdev, GK20A_SIM_IORESOURCE_MEM,
106 &g->sim.reg_mem); 108 &g->sim.reg_mem);
107 if (IS_ERR(g->sim.regs)) { 109 if (IS_ERR(g->sim.regs)) {
108 dev_err(dev, "failed to remap gk20a sim regs\n"); 110 nvgpu_err(g, "failed to remap gk20a sim regs\n");
109 err = PTR_ERR(g->sim.regs); 111 err = PTR_ERR(g->sim.regs);
110 goto fail; 112 goto fail;
111 } 113 }
112 114
113 /* allocate sim event/msg buffers */ 115 /* allocate sim event/msg buffers */
114 err = alloc_and_kmap_iopage(dev, &g->sim.send_bfr.kvaddr, 116 err = alloc_and_kmap_iopage(g, &g->sim.send_bfr.kvaddr,
115 &g->sim.send_bfr.phys, 117 &g->sim.send_bfr.phys,
116 &g->sim.send_bfr.page); 118 &g->sim.send_bfr.page);
117 119
118 err = err || alloc_and_kmap_iopage(dev, &g->sim.recv_bfr.kvaddr, 120 err = err || alloc_and_kmap_iopage(g, &g->sim.recv_bfr.kvaddr,
119 &g->sim.recv_bfr.phys, 121 &g->sim.recv_bfr.phys,
120 &g->sim.recv_bfr.page); 122 &g->sim.recv_bfr.page);
121 123
122 err = err || alloc_and_kmap_iopage(dev, &g->sim.msg_bfr.kvaddr, 124 err = err || alloc_and_kmap_iopage(g, &g->sim.msg_bfr.kvaddr,
123 &g->sim.msg_bfr.phys, 125 &g->sim.msg_bfr.phys,
124 &g->sim.msg_bfr.page); 126 &g->sim.msg_bfr.page);
125 127
126 if (!(g->sim.send_bfr.kvaddr && g->sim.recv_bfr.kvaddr && 128 if (!(g->sim.send_bfr.kvaddr && g->sim.recv_bfr.kvaddr &&
127 g->sim.msg_bfr.kvaddr)) { 129 g->sim.msg_bfr.kvaddr)) {
128 dev_err(dev, "couldn't allocate all sim buffers\n"); 130 nvgpu_err(g, "couldn't allocate all sim buffers\n");
129 goto fail; 131 goto fail;
130 } 132 }
131 133
@@ -275,7 +277,7 @@ static int rpc_recv_poll(struct gk20a *g)
275 (u64)recv_phys_addr_lo << PAGE_SHIFT; 277 (u64)recv_phys_addr_lo << PAGE_SHIFT;
276 278
277 if (recv_phys_addr != g->sim.msg_bfr.phys) { 279 if (recv_phys_addr != g->sim.msg_bfr.phys) {
278 dev_err(dev_from_gk20a(g), "%s Error in RPC reply\n", 280 nvgpu_err(g, "%s Error in RPC reply\n",
279 __func__); 281 __func__);
280 return -1; 282 return -1;
281 } 283 }
@@ -302,21 +304,21 @@ static int issue_rpc_and_wait(struct gk20a *g)
302 304
303 err = rpc_send_message(g); 305 err = rpc_send_message(g);
304 if (err) { 306 if (err) {
305 dev_err(dev_from_gk20a(g), "%s failed rpc_send_message\n", 307 nvgpu_err(g, "%s failed rpc_send_message\n",
306 __func__); 308 __func__);
307 return err; 309 return err;
308 } 310 }
309 311
310 err = rpc_recv_poll(g); 312 err = rpc_recv_poll(g);
311 if (err) { 313 if (err) {
312 dev_err(dev_from_gk20a(g), "%s failed rpc_recv_poll\n", 314 nvgpu_err(g, "%s failed rpc_recv_poll\n",
313 __func__); 315 __func__);
314 return err; 316 return err;
315 } 317 }
316 318
317 /* Now check if RPC really succeeded */ 319 /* Now check if RPC really succeeded */
318 if (*sim_msg_hdr(g, sim_msg_result_r()) != sim_msg_result_success_v()) { 320 if (*sim_msg_hdr(g, sim_msg_result_r()) != sim_msg_result_success_v()) {
319 dev_err(dev_from_gk20a(g), "%s received failed status!\n", 321 nvgpu_err(g, "%s received failed status!\n",
320 __func__); 322 __func__);
321 return -(*sim_msg_hdr(g, sim_msg_result_r())); 323 return -(*sim_msg_hdr(g, sim_msg_result_r()));
322 } 324 }
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index 6281e4ad..5f07ade7 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <nvgpu/kmem.h> 17#include <nvgpu/kmem.h>
18#include <nvgpu/log.h>
18 19
19#include "gk20a.h" 20#include "gk20a.h"
20#include "tsg_gk20a.h" 21#include "tsg_gk20a.h"
@@ -93,7 +94,7 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
93 if (tsg->runlist_id == FIFO_INVAL_TSG_ID) 94 if (tsg->runlist_id == FIFO_INVAL_TSG_ID)
94 tsg->runlist_id = ch->runlist_id; 95 tsg->runlist_id = ch->runlist_id;
95 else if (tsg->runlist_id != ch->runlist_id) { 96 else if (tsg->runlist_id != ch->runlist_id) {
96 gk20a_err(dev_from_gk20a(tsg->g), 97 nvgpu_err(tsg->g,
97 "Error: TSG channel should be share same runlist ch[%d] tsg[%d]\n", 98 "Error: TSG channel should be share same runlist ch[%d] tsg[%d]\n",
98 ch->runlist_id, tsg->runlist_id); 99 ch->runlist_id, tsg->runlist_id);
99 return -EINVAL; 100 return -EINVAL;
@@ -260,8 +261,7 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g)
260 if (g->ops.fifo.tsg_open) { 261 if (g->ops.fifo.tsg_open) {
261 err = g->ops.fifo.tsg_open(tsg); 262 err = g->ops.fifo.tsg_open(tsg);
262 if (err) { 263 if (err) {
263 gk20a_err(dev_from_gk20a(g), 264 nvgpu_err(g, "tsg %d fifo open failed %d",
264 "tsg %d fifo open failed %d",
265 tsg->tsgid, err); 265 tsg->tsgid, err);
266 goto clean_up; 266 goto clean_up;
267 } 267 }