summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/cde_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/cde_gk20a.c109
1 files changed, 58 insertions, 51 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
index 16baaa39..296a8af0 100644
--- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
@@ -28,6 +28,7 @@
28#include <nvgpu/timers.h> 28#include <nvgpu/timers.h>
29#include <nvgpu/nvgpu_common.h> 29#include <nvgpu/nvgpu_common.h>
30#include <nvgpu/kmem.h> 30#include <nvgpu/kmem.h>
31#include <nvgpu/log.h>
31 32
32#include "gk20a.h" 33#include "gk20a.h"
33#include "channel_gk20a.h" 34#include "channel_gk20a.h"
@@ -228,19 +229,20 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx,
228 struct gk20a_cde_hdr_buf *buf) 229 struct gk20a_cde_hdr_buf *buf)
229{ 230{
230 struct nvgpu_mem *mem; 231 struct nvgpu_mem *mem;
232 struct gk20a *g = cde_ctx->g;
231 int err; 233 int err;
232 234
233 /* check that the file can hold the buf */ 235 /* check that the file can hold the buf */
234 if (buf->data_byte_offset != 0 && 236 if (buf->data_byte_offset != 0 &&
235 buf->data_byte_offset + buf->num_bytes > img->size) { 237 buf->data_byte_offset + buf->num_bytes > img->size) {
236 gk20a_warn(cde_ctx->dev, "cde: invalid data section. buffer idx = %d", 238 nvgpu_warn(g, "cde: invalid data section. buffer idx = %d",
237 cde_ctx->num_bufs); 239 cde_ctx->num_bufs);
238 return -EINVAL; 240 return -EINVAL;
239 } 241 }
240 242
241 /* check that we have enough buf elems available */ 243 /* check that we have enough buf elems available */
242 if (cde_ctx->num_bufs >= MAX_CDE_BUFS) { 244 if (cde_ctx->num_bufs >= MAX_CDE_BUFS) {
243 gk20a_warn(cde_ctx->dev, "cde: invalid data section. buffer idx = %d", 245 nvgpu_warn(g, "cde: invalid data section. buffer idx = %d",
244 cde_ctx->num_bufs); 246 cde_ctx->num_bufs);
245 return -ENOMEM; 247 return -ENOMEM;
246 } 248 }
@@ -249,7 +251,7 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx,
249 mem = cde_ctx->mem + cde_ctx->num_bufs; 251 mem = cde_ctx->mem + cde_ctx->num_bufs;
250 err = nvgpu_dma_alloc_map_sys(cde_ctx->vm, buf->num_bytes, mem); 252 err = nvgpu_dma_alloc_map_sys(cde_ctx->vm, buf->num_bytes, mem);
251 if (err) { 253 if (err) {
252 gk20a_warn(cde_ctx->dev, "cde: could not allocate device memory. buffer idx = %d", 254 nvgpu_warn(g, "cde: could not allocate device memory. buffer idx = %d",
253 cde_ctx->num_bufs); 255 cde_ctx->num_bufs);
254 return -ENOMEM; 256 return -ENOMEM;
255 } 257 }
@@ -267,6 +269,7 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx,
267static int gk20a_replace_data(struct gk20a_cde_ctx *cde_ctx, void *target, 269static int gk20a_replace_data(struct gk20a_cde_ctx *cde_ctx, void *target,
268 int type, s32 shift, u64 mask, u64 value) 270 int type, s32 shift, u64 mask, u64 value)
269{ 271{
272 struct gk20a *g = cde_ctx->g;
270 u32 *target_mem_ptr = target; 273 u32 *target_mem_ptr = target;
271 u64 *target_mem_ptr_u64 = target; 274 u64 *target_mem_ptr_u64 = target;
272 u64 current_value, new_value; 275 u64 current_value, new_value;
@@ -287,7 +290,7 @@ static int gk20a_replace_data(struct gk20a_cde_ctx *cde_ctx, void *target,
287 current_value = (u64)(current_value >> 32) | 290 current_value = (u64)(current_value >> 32) |
288 (u64)(current_value << 32); 291 (u64)(current_value << 32);
289 } else { 292 } else {
290 gk20a_warn(cde_ctx->dev, "cde: unknown type. type=%d", 293 nvgpu_warn(g, "cde: unknown type. type=%d",
291 type); 294 type);
292 return -EINVAL; 295 return -EINVAL;
293 } 296 }
@@ -315,13 +318,14 @@ static int gk20a_init_cde_replace(struct gk20a_cde_ctx *cde_ctx,
315{ 318{
316 struct nvgpu_mem *source_mem; 319 struct nvgpu_mem *source_mem;
317 struct nvgpu_mem *target_mem; 320 struct nvgpu_mem *target_mem;
321 struct gk20a *g = cde_ctx->g;
318 u32 *target_mem_ptr; 322 u32 *target_mem_ptr;
319 u64 vaddr; 323 u64 vaddr;
320 int err; 324 int err;
321 325
322 if (replace->target_buf >= cde_ctx->num_bufs || 326 if (replace->target_buf >= cde_ctx->num_bufs ||
323 replace->source_buf >= cde_ctx->num_bufs) { 327 replace->source_buf >= cde_ctx->num_bufs) {
324 gk20a_warn(cde_ctx->dev, "cde: invalid buffer. target_buf=%u, source_buf=%u, num_bufs=%d", 328 nvgpu_warn(g, "cde: invalid buffer. target_buf=%u, source_buf=%u, num_bufs=%d",
325 replace->target_buf, replace->source_buf, 329 replace->target_buf, replace->source_buf,
326 cde_ctx->num_bufs); 330 cde_ctx->num_bufs);
327 return -EINVAL; 331 return -EINVAL;
@@ -333,7 +337,7 @@ static int gk20a_init_cde_replace(struct gk20a_cde_ctx *cde_ctx,
333 337
334 if (source_mem->size < (replace->source_byte_offset + 3) || 338 if (source_mem->size < (replace->source_byte_offset + 3) ||
335 target_mem->size < (replace->target_byte_offset + 3)) { 339 target_mem->size < (replace->target_byte_offset + 3)) {
336 gk20a_warn(cde_ctx->dev, "cde: invalid buffer offsets. target_buf_offs=%lld, source_buf_offs=%lld, source_buf_size=%zu, dest_buf_size=%zu", 340 nvgpu_warn(g, "cde: invalid buffer offsets. target_buf_offs=%lld, source_buf_offs=%lld, source_buf_size=%zu, dest_buf_size=%zu",
337 replace->target_byte_offset, 341 replace->target_byte_offset,
338 replace->source_byte_offset, 342 replace->source_byte_offset,
339 source_mem->size, 343 source_mem->size,
@@ -350,7 +354,7 @@ static int gk20a_init_cde_replace(struct gk20a_cde_ctx *cde_ctx,
350 replace->shift, replace->mask, 354 replace->shift, replace->mask,
351 vaddr); 355 vaddr);
352 if (err) { 356 if (err) {
353 gk20a_warn(cde_ctx->dev, "cde: replace failed. err=%d, target_buf=%u, target_buf_offs=%lld, source_buf=%u, source_buf_offs=%lld", 357 nvgpu_warn(g, "cde: replace failed. err=%d, target_buf=%u, target_buf_offs=%lld, source_buf=%u, source_buf_offs=%lld",
354 err, replace->target_buf, 358 err, replace->target_buf,
355 replace->target_byte_offset, 359 replace->target_byte_offset,
356 replace->source_buf, 360 replace->source_buf,
@@ -438,7 +442,7 @@ static int gk20a_cde_patch_params(struct gk20a_cde_ctx *cde_ctx)
438 param->shift, param->mask, new_data); 442 param->shift, param->mask, new_data);
439 443
440 if (err) { 444 if (err) {
441 gk20a_warn(cde_ctx->dev, "cde: patch failed. err=%d, idx=%d, id=%d, target_buf=%u, target_buf_offs=%lld, patch_value=%llu", 445 nvgpu_warn(g, "cde: patch failed. err=%d, idx=%d, id=%d, target_buf=%u, target_buf_offs=%lld, patch_value=%llu",
442 err, i, param->id, param->target_buf, 446 err, i, param->id, param->target_buf,
443 param->target_byte_offset, new_data); 447 param->target_byte_offset, new_data);
444 return err; 448 return err;
@@ -453,9 +457,10 @@ static int gk20a_init_cde_param(struct gk20a_cde_ctx *cde_ctx,
453 struct gk20a_cde_hdr_param *param) 457 struct gk20a_cde_hdr_param *param)
454{ 458{
455 struct nvgpu_mem *target_mem; 459 struct nvgpu_mem *target_mem;
460 struct gk20a *g = cde_ctx->g;
456 461
457 if (param->target_buf >= cde_ctx->num_bufs) { 462 if (param->target_buf >= cde_ctx->num_bufs) {
458 gk20a_warn(cde_ctx->dev, "cde: invalid buffer parameter. param idx = %d, target_buf=%u, num_bufs=%u", 463 nvgpu_warn(g, "cde: invalid buffer parameter. param idx = %d, target_buf=%u, num_bufs=%u",
459 cde_ctx->num_params, param->target_buf, 464 cde_ctx->num_params, param->target_buf,
460 cde_ctx->num_bufs); 465 cde_ctx->num_bufs);
461 return -EINVAL; 466 return -EINVAL;
@@ -463,7 +468,7 @@ static int gk20a_init_cde_param(struct gk20a_cde_ctx *cde_ctx,
463 468
464 target_mem = cde_ctx->mem + param->target_buf; 469 target_mem = cde_ctx->mem + param->target_buf;
465 if (target_mem->size < (param->target_byte_offset + 3)) { 470 if (target_mem->size < (param->target_byte_offset + 3)) {
466 gk20a_warn(cde_ctx->dev, "cde: invalid buffer parameter. param idx = %d, target_buf_offs=%lld, target_buf_size=%zu", 471 nvgpu_warn(g, "cde: invalid buffer parameter. param idx = %d, target_buf_offs=%lld, target_buf_size=%zu",
467 cde_ctx->num_params, param->target_byte_offset, 472 cde_ctx->num_params, param->target_byte_offset,
468 target_mem->size); 473 target_mem->size);
469 return -EINVAL; 474 return -EINVAL;
@@ -471,14 +476,14 @@ static int gk20a_init_cde_param(struct gk20a_cde_ctx *cde_ctx,
471 476
472 /* does this parameter fit into our parameter structure */ 477 /* does this parameter fit into our parameter structure */
473 if (cde_ctx->num_params >= MAX_CDE_PARAMS) { 478 if (cde_ctx->num_params >= MAX_CDE_PARAMS) {
474 gk20a_warn(cde_ctx->dev, "cde: no room for new parameters param idx = %d", 479 nvgpu_warn(g, "cde: no room for new parameters param idx = %d",
475 cde_ctx->num_params); 480 cde_ctx->num_params);
476 return -ENOMEM; 481 return -ENOMEM;
477 } 482 }
478 483
479 /* is the given id valid? */ 484 /* is the given id valid? */
480 if (param->id >= NUM_RESERVED_PARAMS + MAX_CDE_USER_PARAMS) { 485 if (param->id >= NUM_RESERVED_PARAMS + MAX_CDE_USER_PARAMS) {
481 gk20a_warn(cde_ctx->dev, "cde: parameter id is not valid. param idx = %d, id=%u, max=%u", 486 nvgpu_warn(g, "cde: parameter id is not valid. param idx = %d, id=%u, max=%u",
482 param->id, cde_ctx->num_params, 487 param->id, cde_ctx->num_params,
483 NUM_RESERVED_PARAMS + MAX_CDE_USER_PARAMS); 488 NUM_RESERVED_PARAMS + MAX_CDE_USER_PARAMS);
484 return -EINVAL; 489 return -EINVAL;
@@ -494,6 +499,7 @@ static int gk20a_init_cde_required_class(struct gk20a_cde_ctx *cde_ctx,
494 const struct firmware *img, 499 const struct firmware *img,
495 u32 required_class) 500 u32 required_class)
496{ 501{
502 struct gk20a *g = cde_ctx->g;
497 struct nvgpu_alloc_obj_ctx_args alloc_obj_ctx; 503 struct nvgpu_alloc_obj_ctx_args alloc_obj_ctx;
498 int err; 504 int err;
499 505
@@ -505,7 +511,7 @@ static int gk20a_init_cde_required_class(struct gk20a_cde_ctx *cde_ctx,
505 511
506 err = gk20a_alloc_obj_ctx(cde_ctx->ch, &alloc_obj_ctx); 512 err = gk20a_alloc_obj_ctx(cde_ctx->ch, &alloc_obj_ctx);
507 if (err) { 513 if (err) {
508 gk20a_warn(cde_ctx->dev, "cde: failed to allocate ctx. err=%d", 514 nvgpu_warn(g, "cde: failed to allocate ctx. err=%d",
509 err); 515 err);
510 return err; 516 return err;
511 } 517 }
@@ -519,6 +525,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
519 struct gk20a_cde_cmd_elem *cmd_elem, 525 struct gk20a_cde_cmd_elem *cmd_elem,
520 u32 num_elems) 526 u32 num_elems)
521{ 527{
528 struct gk20a *g = cde_ctx->g;
522 struct nvgpu_gpfifo **gpfifo, *gpfifo_elem; 529 struct nvgpu_gpfifo **gpfifo, *gpfifo_elem;
523 u32 *num_entries; 530 u32 *num_entries;
524 unsigned int i; 531 unsigned int i;
@@ -531,7 +538,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
531 gpfifo = &cde_ctx->convert_cmd; 538 gpfifo = &cde_ctx->convert_cmd;
532 num_entries = &cde_ctx->convert_cmd_num_entries; 539 num_entries = &cde_ctx->convert_cmd_num_entries;
533 } else { 540 } else {
534 gk20a_warn(cde_ctx->dev, "cde: unknown command. op=%u", 541 nvgpu_warn(g, "cde: unknown command. op=%u",
535 op); 542 op);
536 return -EINVAL; 543 return -EINVAL;
537 } 544 }
@@ -540,7 +547,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
540 *gpfifo = nvgpu_kzalloc(cde_ctx->g, 547 *gpfifo = nvgpu_kzalloc(cde_ctx->g,
541 sizeof(struct nvgpu_gpfifo) * num_elems); 548 sizeof(struct nvgpu_gpfifo) * num_elems);
542 if (!*gpfifo) { 549 if (!*gpfifo) {
543 gk20a_warn(cde_ctx->dev, "cde: could not allocate memory for gpfifo entries"); 550 nvgpu_warn(g, "cde: could not allocate memory for gpfifo entries");
544 return -ENOMEM; 551 return -ENOMEM;
545 } 552 }
546 553
@@ -550,7 +557,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
550 557
551 /* validate the current entry */ 558 /* validate the current entry */
552 if (cmd_elem->target_buf >= cde_ctx->num_bufs) { 559 if (cmd_elem->target_buf >= cde_ctx->num_bufs) {
553 gk20a_warn(cde_ctx->dev, "cde: target buffer is not available (target=%u, num_bufs=%u)", 560 nvgpu_warn(g, "cde: target buffer is not available (target=%u, num_bufs=%u)",
554 cmd_elem->target_buf, cde_ctx->num_bufs); 561 cmd_elem->target_buf, cde_ctx->num_bufs);
555 return -EINVAL; 562 return -EINVAL;
556 } 563 }
@@ -558,7 +565,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
558 target_mem = cde_ctx->mem + cmd_elem->target_buf; 565 target_mem = cde_ctx->mem + cmd_elem->target_buf;
559 if (target_mem->size< 566 if (target_mem->size<
560 cmd_elem->target_byte_offset + cmd_elem->num_bytes) { 567 cmd_elem->target_byte_offset + cmd_elem->num_bytes) {
561 gk20a_warn(cde_ctx->dev, "cde: target buffer cannot hold all entries (target_size=%zu, target_byte_offset=%lld, num_bytes=%llu)", 568 nvgpu_warn(g, "cde: target buffer cannot hold all entries (target_size=%zu, target_byte_offset=%lld, num_bytes=%llu)",
562 target_mem->size, 569 target_mem->size,
563 cmd_elem->target_byte_offset, 570 cmd_elem->target_byte_offset,
564 cmd_elem->num_bytes); 571 cmd_elem->num_bytes);
@@ -582,6 +589,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
582 589
583static int gk20a_cde_pack_cmdbufs(struct gk20a_cde_ctx *cde_ctx) 590static int gk20a_cde_pack_cmdbufs(struct gk20a_cde_ctx *cde_ctx)
584{ 591{
592 struct gk20a *g = cde_ctx->g;
585 unsigned long init_bytes = cde_ctx->init_cmd_num_entries * 593 unsigned long init_bytes = cde_ctx->init_cmd_num_entries *
586 sizeof(struct nvgpu_gpfifo); 594 sizeof(struct nvgpu_gpfifo);
587 unsigned long conv_bytes = cde_ctx->convert_cmd_num_entries * 595 unsigned long conv_bytes = cde_ctx->convert_cmd_num_entries *
@@ -592,8 +600,8 @@ static int gk20a_cde_pack_cmdbufs(struct gk20a_cde_ctx *cde_ctx)
592 /* allocate buffer that has space for both */ 600 /* allocate buffer that has space for both */
593 combined_cmd = nvgpu_kzalloc(cde_ctx->g, total_bytes); 601 combined_cmd = nvgpu_kzalloc(cde_ctx->g, total_bytes);
594 if (!combined_cmd) { 602 if (!combined_cmd) {
595 gk20a_warn(cde_ctx->dev, 603 nvgpu_warn(g,
596 "cde: could not allocate memory for gpfifo entries"); 604 "cde: could not allocate memory for gpfifo entries");
597 return -ENOMEM; 605 return -ENOMEM;
598 } 606 }
599 607
@@ -615,6 +623,7 @@ static int gk20a_cde_pack_cmdbufs(struct gk20a_cde_ctx *cde_ctx)
615static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx, 623static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
616 const struct firmware *img) 624 const struct firmware *img)
617{ 625{
626 struct gk20a *g = cde_ctx->g;
618 struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app; 627 struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app;
619 u32 *data = (u32 *)img->data; 628 u32 *data = (u32 *)img->data;
620 u32 num_of_elems; 629 u32 num_of_elems;
@@ -625,7 +634,7 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
625 634
626 min_size += 2 * sizeof(u32); 635 min_size += 2 * sizeof(u32);
627 if (img->size < min_size) { 636 if (img->size < min_size) {
628 gk20a_warn(cde_ctx->dev, "cde: invalid image header"); 637 nvgpu_warn(g, "cde: invalid image header");
629 return -EINVAL; 638 return -EINVAL;
630 } 639 }
631 640
@@ -634,7 +643,7 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
634 643
635 min_size += num_of_elems * sizeof(*elem); 644 min_size += num_of_elems * sizeof(*elem);
636 if (img->size < min_size) { 645 if (img->size < min_size) {
637 gk20a_warn(cde_ctx->dev, "cde: bad image"); 646 nvgpu_warn(g, "cde: bad image");
638 return -EINVAL; 647 return -EINVAL;
639 } 648 }
640 649
@@ -671,7 +680,7 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
671 MAX_CDE_ARRAY_ENTRIES*sizeof(u32)); 680 MAX_CDE_ARRAY_ENTRIES*sizeof(u32));
672 break; 681 break;
673 default: 682 default:
674 gk20a_warn(cde_ctx->dev, "cde: unknown header element"); 683 nvgpu_warn(g, "cde: unknown header element");
675 err = -EINVAL; 684 err = -EINVAL;
676 } 685 }
677 686
@@ -682,13 +691,13 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
682 } 691 }
683 692
684 if (!cde_ctx->init_convert_cmd || !cde_ctx->init_cmd_num_entries) { 693 if (!cde_ctx->init_convert_cmd || !cde_ctx->init_cmd_num_entries) {
685 gk20a_warn(cde_ctx->dev, "cde: convert command not defined"); 694 nvgpu_warn(g, "cde: convert command not defined");
686 err = -EINVAL; 695 err = -EINVAL;
687 goto deinit_image; 696 goto deinit_image;
688 } 697 }
689 698
690 if (!cde_ctx->convert_cmd || !cde_ctx->convert_cmd_num_entries) { 699 if (!cde_ctx->convert_cmd || !cde_ctx->convert_cmd_num_entries) {
691 gk20a_warn(cde_ctx->dev, "cde: convert command not defined"); 700 nvgpu_warn(g, "cde: convert command not defined");
692 err = -EINVAL; 701 err = -EINVAL;
693 goto deinit_image; 702 goto deinit_image;
694 } 703 }
@@ -708,6 +717,7 @@ static int gk20a_cde_execute_buffer(struct gk20a_cde_ctx *cde_ctx,
708 u32 op, struct nvgpu_fence *fence, 717 u32 op, struct nvgpu_fence *fence,
709 u32 flags, struct gk20a_fence **fence_out) 718 u32 flags, struct gk20a_fence **fence_out)
710{ 719{
720 struct gk20a *g = cde_ctx->g;
711 struct nvgpu_gpfifo *gpfifo = NULL; 721 struct nvgpu_gpfifo *gpfifo = NULL;
712 int num_entries = 0; 722 int num_entries = 0;
713 723
@@ -721,12 +731,12 @@ static int gk20a_cde_execute_buffer(struct gk20a_cde_ctx *cde_ctx,
721 gpfifo = cde_ctx->convert_cmd; 731 gpfifo = cde_ctx->convert_cmd;
722 num_entries = cde_ctx->convert_cmd_num_entries; 732 num_entries = cde_ctx->convert_cmd_num_entries;
723 } else { 733 } else {
724 gk20a_warn(cde_ctx->dev, "cde: unknown buffer"); 734 nvgpu_warn(g, "cde: unknown buffer");
725 return -EINVAL; 735 return -EINVAL;
726 } 736 }
727 737
728 if (gpfifo == NULL || num_entries == 0) { 738 if (gpfifo == NULL || num_entries == 0) {
729 gk20a_warn(cde_ctx->dev, "cde: buffer not available"); 739 nvgpu_warn(g, "cde: buffer not available");
730 return -ENOSYS; 740 return -ENOSYS;
731 } 741 }
732 742
@@ -765,7 +775,6 @@ __releases(&cde_app->mutex)
765 struct gk20a_cde_ctx *cde_ctx = container_of(delay_work, 775 struct gk20a_cde_ctx *cde_ctx = container_of(delay_work,
766 struct gk20a_cde_ctx, ctx_deleter_work); 776 struct gk20a_cde_ctx, ctx_deleter_work);
767 struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app; 777 struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app;
768 struct device *dev = cde_ctx->dev;
769 struct gk20a *g = cde_ctx->g; 778 struct gk20a *g = cde_ctx->g;
770 int err; 779 int err;
771 780
@@ -780,7 +789,7 @@ __releases(&cde_app->mutex)
780 if (err) { 789 if (err) {
781 /* this context would find new use anyway later, so not freeing 790 /* this context would find new use anyway later, so not freeing
782 * here does not leak anything */ 791 * here does not leak anything */
783 gk20a_warn(dev, "cde: cannot set gk20a on, postponing" 792 nvgpu_warn(g, "cde: cannot set gk20a on, postponing"
784 " temp ctx deletion"); 793 " temp ctx deletion");
785 return; 794 return;
786 } 795 }
@@ -848,7 +857,7 @@ __must_hold(&cde_app->mutex)
848 857
849 cde_ctx = gk20a_cde_allocate_context(g); 858 cde_ctx = gk20a_cde_allocate_context(g);
850 if (IS_ERR(cde_ctx)) { 859 if (IS_ERR(cde_ctx)) {
851 gk20a_warn(g->dev, "cde: cannot allocate context: %ld", 860 nvgpu_warn(g, "cde: cannot allocate context: %ld",
852 PTR_ERR(cde_ctx)); 861 PTR_ERR(cde_ctx));
853 return cde_ctx; 862 return cde_ctx;
854 } 863 }
@@ -1023,7 +1032,7 @@ __releases(&cde_app->mutex)
1023 1032
1024 surface = dma_buf_vmap(compbits_scatter_buf); 1033 surface = dma_buf_vmap(compbits_scatter_buf);
1025 if (IS_ERR(surface)) { 1034 if (IS_ERR(surface)) {
1026 gk20a_warn(g->dev, 1035 nvgpu_warn(g,
1027 "dma_buf_vmap failed"); 1036 "dma_buf_vmap failed");
1028 err = -EINVAL; 1037 err = -EINVAL;
1029 goto exit_unmap_vaddr; 1038 goto exit_unmap_vaddr;
@@ -1035,7 +1044,7 @@ __releases(&cde_app->mutex)
1035 surface, scatter_buffer); 1044 surface, scatter_buffer);
1036 sgt = gk20a_mm_pin(g->dev, compbits_scatter_buf); 1045 sgt = gk20a_mm_pin(g->dev, compbits_scatter_buf);
1037 if (IS_ERR(sgt)) { 1046 if (IS_ERR(sgt)) {
1038 gk20a_warn(g->dev, 1047 nvgpu_warn(g,
1039 "mm_pin failed"); 1048 "mm_pin failed");
1040 err = -EINVAL; 1049 err = -EINVAL;
1041 goto exit_unmap_surface; 1050 goto exit_unmap_surface;
@@ -1083,7 +1092,7 @@ __releases(&cde_app->mutex)
1083 int id = param->id - NUM_RESERVED_PARAMS; 1092 int id = param->id - NUM_RESERVED_PARAMS;
1084 1093
1085 if (id < 0 || id >= MAX_CDE_USER_PARAMS) { 1094 if (id < 0 || id >= MAX_CDE_USER_PARAMS) {
1086 gk20a_warn(cde_ctx->dev, "cde: unknown user parameter"); 1095 nvgpu_warn(g, "cde: unknown user parameter");
1087 err = -EINVAL; 1096 err = -EINVAL;
1088 goto exit_unmap_surface; 1097 goto exit_unmap_surface;
1089 } 1098 }
@@ -1093,7 +1102,7 @@ __releases(&cde_app->mutex)
1093 /* patch data */ 1102 /* patch data */
1094 err = gk20a_cde_patch_params(cde_ctx); 1103 err = gk20a_cde_patch_params(cde_ctx);
1095 if (err) { 1104 if (err) {
1096 gk20a_warn(cde_ctx->dev, "cde: failed to patch parameters"); 1105 nvgpu_warn(g, "cde: failed to patch parameters");
1097 goto exit_unmap_surface; 1106 goto exit_unmap_surface;
1098 } 1107 }
1099 1108
@@ -1160,20 +1169,19 @@ __releases(&cde_app->mutex)
1160 1169
1161 if (ch->has_timedout) { 1170 if (ch->has_timedout) {
1162 if (cde_ctx->is_temporary) { 1171 if (cde_ctx->is_temporary) {
1163 gk20a_warn(cde_ctx->dev, 1172 nvgpu_warn(g,
1164 "cde: channel had timed out" 1173 "cde: channel had timed out"
1165 " (temporary channel)"); 1174 " (temporary channel)");
1166 /* going to be deleted anyway */ 1175 /* going to be deleted anyway */
1167 } else { 1176 } else {
1168 gk20a_warn(cde_ctx->dev, 1177 nvgpu_warn(g,
1169 "cde: channel had timed out" 1178 "cde: channel had timed out"
1170 ", reloading"); 1179 ", reloading");
1171 /* mark it to be deleted, replace with a new one */ 1180 /* mark it to be deleted, replace with a new one */
1172 nvgpu_mutex_acquire(&cde_app->mutex); 1181 nvgpu_mutex_acquire(&cde_app->mutex);
1173 cde_ctx->is_temporary = true; 1182 cde_ctx->is_temporary = true;
1174 if (gk20a_cde_create_context(g)) { 1183 if (gk20a_cde_create_context(g)) {
1175 gk20a_err(cde_ctx->dev, 1184 nvgpu_err(g, "cde: can't replace context");
1176 "cde: can't replace context");
1177 } 1185 }
1178 nvgpu_mutex_release(&cde_app->mutex); 1186 nvgpu_mutex_release(&cde_app->mutex);
1179 } 1187 }
@@ -1201,7 +1209,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
1201 1209
1202 img = nvgpu_request_firmware(g, "gpu2cde.bin", 0); 1210 img = nvgpu_request_firmware(g, "gpu2cde.bin", 0);
1203 if (!img) { 1211 if (!img) {
1204 dev_err(cde_ctx->dev, "cde: could not fetch the firmware"); 1212 nvgpu_err(g, "cde: could not fetch the firmware");
1205 return -ENOSYS; 1213 return -ENOSYS;
1206 } 1214 }
1207 1215
@@ -1210,7 +1218,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
1210 -1, 1218 -1,
1211 false); 1219 false);
1212 if (!ch) { 1220 if (!ch) {
1213 gk20a_warn(cde_ctx->dev, "cde: gk20a channel not available"); 1221 nvgpu_warn(g, "cde: gk20a channel not available");
1214 err = -ENOMEM; 1222 err = -ENOMEM;
1215 goto err_get_gk20a_channel; 1223 goto err_get_gk20a_channel;
1216 } 1224 }
@@ -1218,14 +1226,14 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
1218 /* bind the channel to the vm */ 1226 /* bind the channel to the vm */
1219 err = __gk20a_vm_bind_channel(&g->mm.cde.vm, ch); 1227 err = __gk20a_vm_bind_channel(&g->mm.cde.vm, ch);
1220 if (err) { 1228 if (err) {
1221 gk20a_warn(cde_ctx->dev, "cde: could not bind vm"); 1229 nvgpu_warn(g, "cde: could not bind vm");
1222 goto err_commit_va; 1230 goto err_commit_va;
1223 } 1231 }
1224 1232
1225 /* allocate gpfifo (1024 should be more than enough) */ 1233 /* allocate gpfifo (1024 should be more than enough) */
1226 err = gk20a_channel_alloc_gpfifo(ch, 1024, 0, 0); 1234 err = gk20a_channel_alloc_gpfifo(ch, 1024, 0, 0);
1227 if (err) { 1235 if (err) {
1228 gk20a_warn(cde_ctx->dev, "cde: unable to allocate gpfifo"); 1236 nvgpu_warn(g, "cde: unable to allocate gpfifo");
1229 goto err_alloc_gpfifo; 1237 goto err_alloc_gpfifo;
1230 } 1238 }
1231 1239
@@ -1238,7 +1246,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
1238 gr->compbit_store.mem.aperture); 1246 gr->compbit_store.mem.aperture);
1239 1247
1240 if (!vaddr) { 1248 if (!vaddr) {
1241 gk20a_warn(cde_ctx->dev, "cde: cannot map compression bit backing store"); 1249 nvgpu_warn(g, "cde: cannot map compression bit backing store");
1242 err = -ENOMEM; 1250 err = -ENOMEM;
1243 goto err_map_backingstore; 1251 goto err_map_backingstore;
1244 } 1252 }
@@ -1251,7 +1259,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
1251 /* initialise the firmware */ 1259 /* initialise the firmware */
1252 err = gk20a_init_cde_img(cde_ctx, img); 1260 err = gk20a_init_cde_img(cde_ctx, img);
1253 if (err) { 1261 if (err) {
1254 gk20a_warn(cde_ctx->dev, "cde: image initialisation failed"); 1262 nvgpu_warn(g, "cde: image initialisation failed");
1255 goto err_init_cde_img; 1263 goto err_init_cde_img;
1256 } 1264 }
1257 1265
@@ -1268,8 +1276,7 @@ err_alloc_gpfifo:
1268err_commit_va: 1276err_commit_va:
1269err_get_gk20a_channel: 1277err_get_gk20a_channel:
1270 release_firmware(img); 1278 release_firmware(img);
1271 dev_err(cde_ctx->dev, "cde: couldn't initialise buffer converter: %d", 1279 nvgpu_err(g, "cde: couldn't initialise buffer converter: %d", err);
1272 err);
1273 return err; 1280 return err;
1274} 1281}
1275 1282
@@ -1413,17 +1420,17 @@ static int gk20a_buffer_convert_gpu_to_cde_v1(
1413 g->ops.cde.get_program_numbers(g, block_height_log2, 1420 g->ops.cde.get_program_numbers(g, block_height_log2,
1414 &hprog, &vprog); 1421 &hprog, &vprog);
1415 else { 1422 else {
1416 gk20a_warn(g->dev, "cde: chip not supported"); 1423 nvgpu_warn(g, "cde: chip not supported");
1417 return -ENOSYS; 1424 return -ENOSYS;
1418 } 1425 }
1419 1426
1420 if (hprog < 0 || vprog < 0) { 1427 if (hprog < 0 || vprog < 0) {
1421 gk20a_warn(g->dev, "cde: could not determine programs"); 1428 nvgpu_warn(g, "cde: could not determine programs");
1422 return -ENOSYS; 1429 return -ENOSYS;
1423 } 1430 }
1424 1431
1425 if (xtiles > 8192 / 8 || ytiles > 8192 / 8) 1432 if (xtiles > 8192 / 8 || ytiles > 8192 / 8)
1426 gk20a_warn(g->dev, "cde: surface is exceptionally large (xtiles=%d, ytiles=%d)", 1433 nvgpu_warn(g, "cde: surface is exceptionally large (xtiles=%d, ytiles=%d)",
1427 xtiles, ytiles); 1434 xtiles, ytiles);
1428 1435
1429 gk20a_dbg(gpu_dbg_cde, "w=%d, h=%d, bh_log2=%d, compbits_hoffset=0x%llx, compbits_voffset=0x%llx, scatterbuffer_offset=0x%llx", 1436 gk20a_dbg(gpu_dbg_cde, "w=%d, h=%d, bh_log2=%d, compbits_hoffset=0x%llx, compbits_voffset=0x%llx, scatterbuffer_offset=0x%llx",
@@ -1541,7 +1548,7 @@ static int gk20a_buffer_convert_gpu_to_cde(
1541 width, height, block_height_log2, 1548 width, height, block_height_log2,
1542 submit_flags, fence_in, state); 1549 submit_flags, fence_in, state);
1543 } else { 1550 } else {
1544 dev_err(dev_from_gk20a(g), "unsupported CDE firmware version %d", 1551 nvgpu_err(g, "unsupported CDE firmware version %d",
1545 g->cde_app.firmware_version); 1552 g->cde_app.firmware_version);
1546 err = -EINVAL; 1553 err = -EINVAL;
1547 } 1554 }
@@ -1628,13 +1635,13 @@ int gk20a_mark_compressible_write(struct gk20a *g, u32 buffer_fd,
1628 1635
1629 dmabuf = dma_buf_get(buffer_fd); 1636 dmabuf = dma_buf_get(buffer_fd);
1630 if (IS_ERR(dmabuf)) { 1637 if (IS_ERR(dmabuf)) {
1631 dev_err(dev_from_gk20a(g), "invalid dmabuf"); 1638 nvgpu_err(g, "invalid dmabuf");
1632 return -EINVAL; 1639 return -EINVAL;
1633 } 1640 }
1634 1641
1635 err = gk20a_dmabuf_get_state(dmabuf, dev_from_gk20a(g), offset, &state); 1642 err = gk20a_dmabuf_get_state(dmabuf, dev_from_gk20a(g), offset, &state);
1636 if (err) { 1643 if (err) {
1637 dev_err(dev_from_gk20a(g), "could not get state from dmabuf"); 1644 nvgpu_err(g, "could not get state from dmabuf");
1638 dma_buf_put(dmabuf); 1645 dma_buf_put(dmabuf);
1639 return err; 1646 return err;
1640 } 1647 }