summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2016-03-29 19:02:34 -0400
committerTerje Bergstrom <tbergstrom@nvidia.com>2016-04-08 12:42:41 -0400
commite8bac374c0ed24f05bf389e1e8b5aca47f61bd3a (patch)
tree36b6e111f8706c0560ee552bc9d6e15b87fc9621 /drivers/gpu/nvgpu/gk20a/cde_gk20a.c
parent2382a8433fddaee3baecff6ae941944850787ab7 (diff)
gpu: nvgpu: Use device instead of platform_device
Use struct device instead of struct platform_device wherever possible. This allows adding other bus types later. Change-Id: I1657287a68d85a542cdbdd8a00d1902c3d6e00ed Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1120466
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/cde_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/cde_gk20a.c110
1 files changed, 55 insertions, 55 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
index a2f7e7a4..7818f046 100644
--- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Color decompression engine support 2 * Color decompression engine support
3 * 3 *
4 * Copyright (c) 2014-2015, NVIDIA Corporation. All rights reserved. 4 * Copyright (c) 2014-2016, NVIDIA Corporation. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -227,14 +227,14 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx,
227 /* check that the file can hold the buf */ 227 /* check that the file can hold the buf */
228 if (buf->data_byte_offset != 0 && 228 if (buf->data_byte_offset != 0 &&
229 buf->data_byte_offset + buf->num_bytes > img->size) { 229 buf->data_byte_offset + buf->num_bytes > img->size) {
230 gk20a_warn(&cde_ctx->pdev->dev, "cde: invalid data section. buffer idx = %d", 230 gk20a_warn(cde_ctx->dev, "cde: invalid data section. buffer idx = %d",
231 cde_ctx->num_bufs); 231 cde_ctx->num_bufs);
232 return -EINVAL; 232 return -EINVAL;
233 } 233 }
234 234
235 /* check that we have enough buf elems available */ 235 /* check that we have enough buf elems available */
236 if (cde_ctx->num_bufs >= MAX_CDE_BUFS) { 236 if (cde_ctx->num_bufs >= MAX_CDE_BUFS) {
237 gk20a_warn(&cde_ctx->pdev->dev, "cde: invalid data section. buffer idx = %d", 237 gk20a_warn(cde_ctx->dev, "cde: invalid data section. buffer idx = %d",
238 cde_ctx->num_bufs); 238 cde_ctx->num_bufs);
239 return -ENOMEM; 239 return -ENOMEM;
240 } 240 }
@@ -243,7 +243,7 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx,
243 mem = cde_ctx->mem + cde_ctx->num_bufs; 243 mem = cde_ctx->mem + cde_ctx->num_bufs;
244 err = gk20a_gmmu_alloc_map(cde_ctx->vm, buf->num_bytes, mem); 244 err = gk20a_gmmu_alloc_map(cde_ctx->vm, buf->num_bytes, mem);
245 if (err) { 245 if (err) {
246 gk20a_warn(&cde_ctx->pdev->dev, "cde: could not allocate device memory. buffer idx = %d", 246 gk20a_warn(cde_ctx->dev, "cde: could not allocate device memory. buffer idx = %d",
247 cde_ctx->num_bufs); 247 cde_ctx->num_bufs);
248 return -ENOMEM; 248 return -ENOMEM;
249 } 249 }
@@ -281,7 +281,7 @@ static int gk20a_replace_data(struct gk20a_cde_ctx *cde_ctx, void *target,
281 current_value = (u64)(current_value >> 32) | 281 current_value = (u64)(current_value >> 32) |
282 (u64)(current_value << 32); 282 (u64)(current_value << 32);
283 } else { 283 } else {
284 gk20a_warn(&cde_ctx->pdev->dev, "cde: unknown type. type=%d", 284 gk20a_warn(cde_ctx->dev, "cde: unknown type. type=%d",
285 type); 285 type);
286 return -EINVAL; 286 return -EINVAL;
287 } 287 }
@@ -315,7 +315,7 @@ static int gk20a_init_cde_replace(struct gk20a_cde_ctx *cde_ctx,
315 315
316 if (replace->target_buf >= cde_ctx->num_bufs || 316 if (replace->target_buf >= cde_ctx->num_bufs ||
317 replace->source_buf >= cde_ctx->num_bufs) { 317 replace->source_buf >= cde_ctx->num_bufs) {
318 gk20a_warn(&cde_ctx->pdev->dev, "cde: invalid buffer. target_buf=%u, source_buf=%u, num_bufs=%d", 318 gk20a_warn(cde_ctx->dev, "cde: invalid buffer. target_buf=%u, source_buf=%u, num_bufs=%d",
319 replace->target_buf, replace->source_buf, 319 replace->target_buf, replace->source_buf,
320 cde_ctx->num_bufs); 320 cde_ctx->num_bufs);
321 return -EINVAL; 321 return -EINVAL;
@@ -327,7 +327,7 @@ static int gk20a_init_cde_replace(struct gk20a_cde_ctx *cde_ctx,
327 327
328 if (source_mem->size < (replace->source_byte_offset + 3) || 328 if (source_mem->size < (replace->source_byte_offset + 3) ||
329 target_mem->size < (replace->target_byte_offset + 3)) { 329 target_mem->size < (replace->target_byte_offset + 3)) {
330 gk20a_warn(&cde_ctx->pdev->dev, "cde: invalid buffer offsets. target_buf_offs=%lld, source_buf_offs=%lld, source_buf_size=%zu, dest_buf_size=%zu", 330 gk20a_warn(cde_ctx->dev, "cde: invalid buffer offsets. target_buf_offs=%lld, source_buf_offs=%lld, source_buf_size=%zu, dest_buf_size=%zu",
331 replace->target_byte_offset, 331 replace->target_byte_offset,
332 replace->source_byte_offset, 332 replace->source_byte_offset,
333 source_mem->size, 333 source_mem->size,
@@ -344,7 +344,7 @@ static int gk20a_init_cde_replace(struct gk20a_cde_ctx *cde_ctx,
344 replace->shift, replace->mask, 344 replace->shift, replace->mask,
345 vaddr); 345 vaddr);
346 if (err) { 346 if (err) {
347 gk20a_warn(&cde_ctx->pdev->dev, "cde: replace failed. err=%d, target_buf=%u, target_buf_offs=%lld, source_buf=%u, source_buf_offs=%lld", 347 gk20a_warn(cde_ctx->dev, "cde: replace failed. err=%d, target_buf=%u, target_buf_offs=%lld, source_buf=%u, source_buf_offs=%lld",
348 err, replace->target_buf, 348 err, replace->target_buf,
349 replace->target_byte_offset, 349 replace->target_byte_offset,
350 replace->source_buf, 350 replace->source_buf,
@@ -431,7 +431,7 @@ static int gk20a_cde_patch_params(struct gk20a_cde_ctx *cde_ctx)
431 param->shift, param->mask, new_data); 431 param->shift, param->mask, new_data);
432 432
433 if (err) { 433 if (err) {
434 gk20a_warn(&cde_ctx->pdev->dev, "cde: patch failed. err=%d, idx=%d, id=%d, target_buf=%u, target_buf_offs=%lld, patch_value=%llu", 434 gk20a_warn(cde_ctx->dev, "cde: patch failed. err=%d, idx=%d, id=%d, target_buf=%u, target_buf_offs=%lld, patch_value=%llu",
435 err, i, param->id, param->target_buf, 435 err, i, param->id, param->target_buf,
436 param->target_byte_offset, new_data); 436 param->target_byte_offset, new_data);
437 return err; 437 return err;
@@ -448,7 +448,7 @@ static int gk20a_init_cde_param(struct gk20a_cde_ctx *cde_ctx,
448 struct mem_desc *target_mem; 448 struct mem_desc *target_mem;
449 449
450 if (param->target_buf >= cde_ctx->num_bufs) { 450 if (param->target_buf >= cde_ctx->num_bufs) {
451 gk20a_warn(&cde_ctx->pdev->dev, "cde: invalid buffer parameter. param idx = %d, target_buf=%u, num_bufs=%u", 451 gk20a_warn(cde_ctx->dev, "cde: invalid buffer parameter. param idx = %d, target_buf=%u, num_bufs=%u",
452 cde_ctx->num_params, param->target_buf, 452 cde_ctx->num_params, param->target_buf,
453 cde_ctx->num_bufs); 453 cde_ctx->num_bufs);
454 return -EINVAL; 454 return -EINVAL;
@@ -456,7 +456,7 @@ static int gk20a_init_cde_param(struct gk20a_cde_ctx *cde_ctx,
456 456
457 target_mem = cde_ctx->mem + param->target_buf; 457 target_mem = cde_ctx->mem + param->target_buf;
458 if (target_mem->size< (param->target_byte_offset + 3)) { 458 if (target_mem->size< (param->target_byte_offset + 3)) {
459 gk20a_warn(&cde_ctx->pdev->dev, "cde: invalid buffer parameter. param idx = %d, target_buf_offs=%lld, target_buf_size=%zu", 459 gk20a_warn(cde_ctx->dev, "cde: invalid buffer parameter. param idx = %d, target_buf_offs=%lld, target_buf_size=%zu",
460 cde_ctx->num_params, param->target_byte_offset, 460 cde_ctx->num_params, param->target_byte_offset,
461 target_mem->size); 461 target_mem->size);
462 return -EINVAL; 462 return -EINVAL;
@@ -464,14 +464,14 @@ static int gk20a_init_cde_param(struct gk20a_cde_ctx *cde_ctx,
464 464
465 /* does this parameter fit into our parameter structure */ 465 /* does this parameter fit into our parameter structure */
466 if (cde_ctx->num_params >= MAX_CDE_PARAMS) { 466 if (cde_ctx->num_params >= MAX_CDE_PARAMS) {
467 gk20a_warn(&cde_ctx->pdev->dev, "cde: no room for new parameters param idx = %d", 467 gk20a_warn(cde_ctx->dev, "cde: no room for new parameters param idx = %d",
468 cde_ctx->num_params); 468 cde_ctx->num_params);
469 return -ENOMEM; 469 return -ENOMEM;
470 } 470 }
471 471
472 /* is the given id valid? */ 472 /* is the given id valid? */
473 if (param->id >= NUM_RESERVED_PARAMS + MAX_CDE_USER_PARAMS) { 473 if (param->id >= NUM_RESERVED_PARAMS + MAX_CDE_USER_PARAMS) {
474 gk20a_warn(&cde_ctx->pdev->dev, "cde: parameter id is not valid. param idx = %d, id=%u, max=%u", 474 gk20a_warn(cde_ctx->dev, "cde: parameter id is not valid. param idx = %d, id=%u, max=%u",
475 param->id, cde_ctx->num_params, 475 param->id, cde_ctx->num_params,
476 NUM_RESERVED_PARAMS + MAX_CDE_USER_PARAMS); 476 NUM_RESERVED_PARAMS + MAX_CDE_USER_PARAMS);
477 return -EINVAL; 477 return -EINVAL;
@@ -498,7 +498,7 @@ static int gk20a_init_cde_required_class(struct gk20a_cde_ctx *cde_ctx,
498 498
499 err = gk20a_alloc_obj_ctx(cde_ctx->ch, &alloc_obj_ctx); 499 err = gk20a_alloc_obj_ctx(cde_ctx->ch, &alloc_obj_ctx);
500 if (err) { 500 if (err) {
501 gk20a_warn(&cde_ctx->pdev->dev, "cde: failed to allocate ctx. err=%d", 501 gk20a_warn(cde_ctx->dev, "cde: failed to allocate ctx. err=%d",
502 err); 502 err);
503 return err; 503 return err;
504 } 504 }
@@ -524,7 +524,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
524 gpfifo = &cde_ctx->convert_cmd; 524 gpfifo = &cde_ctx->convert_cmd;
525 num_entries = &cde_ctx->convert_cmd_num_entries; 525 num_entries = &cde_ctx->convert_cmd_num_entries;
526 } else { 526 } else {
527 gk20a_warn(&cde_ctx->pdev->dev, "cde: unknown command. op=%u", 527 gk20a_warn(cde_ctx->dev, "cde: unknown command. op=%u",
528 op); 528 op);
529 return -EINVAL; 529 return -EINVAL;
530 } 530 }
@@ -533,7 +533,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
533 *gpfifo = kzalloc(sizeof(struct nvgpu_gpfifo) * num_elems, 533 *gpfifo = kzalloc(sizeof(struct nvgpu_gpfifo) * num_elems,
534 GFP_KERNEL); 534 GFP_KERNEL);
535 if (!*gpfifo) { 535 if (!*gpfifo) {
536 gk20a_warn(&cde_ctx->pdev->dev, "cde: could not allocate memory for gpfifo entries"); 536 gk20a_warn(cde_ctx->dev, "cde: could not allocate memory for gpfifo entries");
537 return -ENOMEM; 537 return -ENOMEM;
538 } 538 }
539 539
@@ -543,7 +543,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
543 543
544 /* validate the current entry */ 544 /* validate the current entry */
545 if (cmd_elem->target_buf >= cde_ctx->num_bufs) { 545 if (cmd_elem->target_buf >= cde_ctx->num_bufs) {
546 gk20a_warn(&cde_ctx->pdev->dev, "cde: target buffer is not available (target=%u, num_bufs=%u)", 546 gk20a_warn(cde_ctx->dev, "cde: target buffer is not available (target=%u, num_bufs=%u)",
547 cmd_elem->target_buf, cde_ctx->num_bufs); 547 cmd_elem->target_buf, cde_ctx->num_bufs);
548 return -EINVAL; 548 return -EINVAL;
549 } 549 }
@@ -551,7 +551,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
551 target_mem = cde_ctx->mem + cmd_elem->target_buf; 551 target_mem = cde_ctx->mem + cmd_elem->target_buf;
552 if (target_mem->size< 552 if (target_mem->size<
553 cmd_elem->target_byte_offset + cmd_elem->num_bytes) { 553 cmd_elem->target_byte_offset + cmd_elem->num_bytes) {
554 gk20a_warn(&cde_ctx->pdev->dev, "cde: target buffer cannot hold all entries (target_size=%zu, target_byte_offset=%lld, num_bytes=%llu)", 554 gk20a_warn(cde_ctx->dev, "cde: target buffer cannot hold all entries (target_size=%zu, target_byte_offset=%lld, num_bytes=%llu)",
555 target_mem->size, 555 target_mem->size,
556 cmd_elem->target_byte_offset, 556 cmd_elem->target_byte_offset,
557 cmd_elem->num_bytes); 557 cmd_elem->num_bytes);
@@ -585,7 +585,7 @@ static int gk20a_cde_pack_cmdbufs(struct gk20a_cde_ctx *cde_ctx)
585 /* allocate buffer that has space for both */ 585 /* allocate buffer that has space for both */
586 combined_cmd = kzalloc(total_bytes, GFP_KERNEL); 586 combined_cmd = kzalloc(total_bytes, GFP_KERNEL);
587 if (!combined_cmd) { 587 if (!combined_cmd) {
588 gk20a_warn(&cde_ctx->pdev->dev, 588 gk20a_warn(cde_ctx->dev,
589 "cde: could not allocate memory for gpfifo entries"); 589 "cde: could not allocate memory for gpfifo entries");
590 return -ENOMEM; 590 return -ENOMEM;
591 } 591 }
@@ -618,7 +618,7 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
618 618
619 min_size += 2 * sizeof(u32); 619 min_size += 2 * sizeof(u32);
620 if (img->size < min_size) { 620 if (img->size < min_size) {
621 gk20a_warn(&cde_ctx->pdev->dev, "cde: invalid image header"); 621 gk20a_warn(cde_ctx->dev, "cde: invalid image header");
622 return -EINVAL; 622 return -EINVAL;
623 } 623 }
624 624
@@ -627,7 +627,7 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
627 627
628 min_size += num_of_elems * sizeof(*elem); 628 min_size += num_of_elems * sizeof(*elem);
629 if (img->size < min_size) { 629 if (img->size < min_size) {
630 gk20a_warn(&cde_ctx->pdev->dev, "cde: bad image"); 630 gk20a_warn(cde_ctx->dev, "cde: bad image");
631 return -EINVAL; 631 return -EINVAL;
632 } 632 }
633 633
@@ -664,7 +664,7 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
664 MAX_CDE_ARRAY_ENTRIES*sizeof(u32)); 664 MAX_CDE_ARRAY_ENTRIES*sizeof(u32));
665 break; 665 break;
666 default: 666 default:
667 gk20a_warn(&cde_ctx->pdev->dev, "cde: unknown header element"); 667 gk20a_warn(cde_ctx->dev, "cde: unknown header element");
668 err = -EINVAL; 668 err = -EINVAL;
669 } 669 }
670 670
@@ -675,13 +675,13 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
675 } 675 }
676 676
677 if (!cde_ctx->init_convert_cmd || !cde_ctx->init_cmd_num_entries) { 677 if (!cde_ctx->init_convert_cmd || !cde_ctx->init_cmd_num_entries) {
678 gk20a_warn(&cde_ctx->pdev->dev, "cde: convert command not defined"); 678 gk20a_warn(cde_ctx->dev, "cde: convert command not defined");
679 err = -EINVAL; 679 err = -EINVAL;
680 goto deinit_image; 680 goto deinit_image;
681 } 681 }
682 682
683 if (!cde_ctx->convert_cmd || !cde_ctx->convert_cmd_num_entries) { 683 if (!cde_ctx->convert_cmd || !cde_ctx->convert_cmd_num_entries) {
684 gk20a_warn(&cde_ctx->pdev->dev, "cde: convert command not defined"); 684 gk20a_warn(cde_ctx->dev, "cde: convert command not defined");
685 err = -EINVAL; 685 err = -EINVAL;
686 goto deinit_image; 686 goto deinit_image;
687 } 687 }
@@ -714,12 +714,12 @@ static int gk20a_cde_execute_buffer(struct gk20a_cde_ctx *cde_ctx,
714 gpfifo = cde_ctx->convert_cmd; 714 gpfifo = cde_ctx->convert_cmd;
715 num_entries = cde_ctx->convert_cmd_num_entries; 715 num_entries = cde_ctx->convert_cmd_num_entries;
716 } else { 716 } else {
717 gk20a_warn(&cde_ctx->pdev->dev, "cde: unknown buffer"); 717 gk20a_warn(cde_ctx->dev, "cde: unknown buffer");
718 return -EINVAL; 718 return -EINVAL;
719 } 719 }
720 720
721 if (gpfifo == NULL || num_entries == 0) { 721 if (gpfifo == NULL || num_entries == 0) {
722 gk20a_warn(&cde_ctx->pdev->dev, "cde: buffer not available"); 722 gk20a_warn(cde_ctx->dev, "cde: buffer not available");
723 return -ENOSYS; 723 return -ENOSYS;
724 } 724 }
725 725
@@ -757,7 +757,7 @@ __releases(&cde_app->mutex)
757 struct gk20a_cde_ctx *cde_ctx = container_of(delay_work, 757 struct gk20a_cde_ctx *cde_ctx = container_of(delay_work,
758 struct gk20a_cde_ctx, ctx_deleter_work); 758 struct gk20a_cde_ctx, ctx_deleter_work);
759 struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app; 759 struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app;
760 struct platform_device *pdev = cde_ctx->pdev; 760 struct device *dev = cde_ctx->dev;
761 int err; 761 int err;
762 762
763 /* someone has just taken it? engine deletion started? */ 763 /* someone has just taken it? engine deletion started? */
@@ -767,11 +767,11 @@ __releases(&cde_app->mutex)
767 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, 767 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx,
768 "cde: attempting to delete temporary %p", cde_ctx); 768 "cde: attempting to delete temporary %p", cde_ctx);
769 769
770 err = gk20a_busy(pdev); 770 err = gk20a_busy(dev);
771 if (err) { 771 if (err) {
772 /* this context would find new use anyway later, so not freeing 772 /* this context would find new use anyway later, so not freeing
773 * here does not leak anything */ 773 * here does not leak anything */
774 gk20a_warn(&pdev->dev, "cde: cannot set gk20a on, postponing" 774 gk20a_warn(dev, "cde: cannot set gk20a on, postponing"
775 " temp ctx deletion"); 775 " temp ctx deletion");
776 return; 776 return;
777 } 777 }
@@ -795,7 +795,7 @@ __releases(&cde_app->mutex)
795 795
796out: 796out:
797 mutex_unlock(&cde_app->mutex); 797 mutex_unlock(&cde_app->mutex);
798 gk20a_idle(pdev); 798 gk20a_idle(dev);
799} 799}
800 800
801static struct gk20a_cde_ctx *gk20a_cde_do_get_context(struct gk20a *g) 801static struct gk20a_cde_ctx *gk20a_cde_do_get_context(struct gk20a *g)
@@ -839,7 +839,7 @@ __must_hold(&cde_app->mutex)
839 839
840 cde_ctx = gk20a_cde_allocate_context(g); 840 cde_ctx = gk20a_cde_allocate_context(g);
841 if (IS_ERR(cde_ctx)) { 841 if (IS_ERR(cde_ctx)) {
842 gk20a_warn(&g->dev->dev, "cde: cannot allocate context: %ld", 842 gk20a_warn(g->dev, "cde: cannot allocate context: %ld",
843 PTR_ERR(cde_ctx)); 843 PTR_ERR(cde_ctx));
844 return cde_ctx; 844 return cde_ctx;
845 } 845 }
@@ -888,7 +888,7 @@ static struct gk20a_cde_ctx *gk20a_cde_allocate_context(struct gk20a *g)
888 return ERR_PTR(-ENOMEM); 888 return ERR_PTR(-ENOMEM);
889 889
890 cde_ctx->g = g; 890 cde_ctx->g = g;
891 cde_ctx->pdev = g->dev; 891 cde_ctx->dev = g->dev;
892 892
893 ret = gk20a_cde_load(cde_ctx); 893 ret = gk20a_cde_load(cde_ctx);
894 if (ret) { 894 if (ret) {
@@ -951,7 +951,7 @@ __releases(&cde_app->mutex)
951 /* First, map the buffer to local va */ 951 /* First, map the buffer to local va */
952 952
953 /* ensure that the compbits buffer has drvdata */ 953 /* ensure that the compbits buffer has drvdata */
954 err = gk20a_dmabuf_alloc_drvdata(compbits_scatter_buf, &g->dev->dev); 954 err = gk20a_dmabuf_alloc_drvdata(compbits_scatter_buf, g->dev);
955 if (err) 955 if (err)
956 goto exit_unlock; 956 goto exit_unlock;
957 957
@@ -1007,7 +1007,7 @@ __releases(&cde_app->mutex)
1007 1007
1008 surface = dma_buf_vmap(compbits_scatter_buf); 1008 surface = dma_buf_vmap(compbits_scatter_buf);
1009 if (IS_ERR(surface)) { 1009 if (IS_ERR(surface)) {
1010 gk20a_warn(&g->dev->dev, 1010 gk20a_warn(g->dev,
1011 "dma_buf_vmap failed"); 1011 "dma_buf_vmap failed");
1012 err = -EINVAL; 1012 err = -EINVAL;
1013 goto exit_unlock; 1013 goto exit_unlock;
@@ -1017,9 +1017,9 @@ __releases(&cde_app->mutex)
1017 1017
1018 gk20a_dbg(gpu_dbg_cde, "surface=0x%p scatterBuffer=0x%p", 1018 gk20a_dbg(gpu_dbg_cde, "surface=0x%p scatterBuffer=0x%p",
1019 surface, scatter_buffer); 1019 surface, scatter_buffer);
1020 sgt = gk20a_mm_pin(&g->dev->dev, compbits_scatter_buf); 1020 sgt = gk20a_mm_pin(g->dev, compbits_scatter_buf);
1021 if (IS_ERR(sgt)) { 1021 if (IS_ERR(sgt)) {
1022 gk20a_warn(&g->dev->dev, 1022 gk20a_warn(g->dev,
1023 "mm_pin failed"); 1023 "mm_pin failed");
1024 err = -EINVAL; 1024 err = -EINVAL;
1025 goto exit_unlock; 1025 goto exit_unlock;
@@ -1029,7 +1029,7 @@ __releases(&cde_app->mutex)
1029 scatterbuffer_size); 1029 scatterbuffer_size);
1030 WARN_ON(err); 1030 WARN_ON(err);
1031 1031
1032 gk20a_mm_unpin(&g->dev->dev, compbits_scatter_buf, 1032 gk20a_mm_unpin(g->dev, compbits_scatter_buf,
1033 sgt); 1033 sgt);
1034 if (err) 1034 if (err)
1035 goto exit_unlock; 1035 goto exit_unlock;
@@ -1041,7 +1041,7 @@ __releases(&cde_app->mutex)
1041 } 1041 }
1042 1042
1043 /* store source buffer compression tags */ 1043 /* store source buffer compression tags */
1044 gk20a_get_comptags(&g->dev->dev, compbits_scatter_buf, &comptags); 1044 gk20a_get_comptags(g->dev, compbits_scatter_buf, &comptags);
1045 cde_ctx->surf_param_offset = comptags.offset; 1045 cde_ctx->surf_param_offset = comptags.offset;
1046 cde_ctx->surf_param_lines = comptags.lines; 1046 cde_ctx->surf_param_lines = comptags.lines;
1047 1047
@@ -1067,7 +1067,7 @@ __releases(&cde_app->mutex)
1067 int id = param->id - NUM_RESERVED_PARAMS; 1067 int id = param->id - NUM_RESERVED_PARAMS;
1068 1068
1069 if (id < 0 || id >= MAX_CDE_USER_PARAMS) { 1069 if (id < 0 || id >= MAX_CDE_USER_PARAMS) {
1070 gk20a_warn(&cde_ctx->pdev->dev, "cde: unknown user parameter"); 1070 gk20a_warn(cde_ctx->dev, "cde: unknown user parameter");
1071 err = -EINVAL; 1071 err = -EINVAL;
1072 goto exit_unlock; 1072 goto exit_unlock;
1073 } 1073 }
@@ -1077,7 +1077,7 @@ __releases(&cde_app->mutex)
1077 /* patch data */ 1077 /* patch data */
1078 err = gk20a_cde_patch_params(cde_ctx); 1078 err = gk20a_cde_patch_params(cde_ctx);
1079 if (err) { 1079 if (err) {
1080 gk20a_warn(&cde_ctx->pdev->dev, "cde: failed to patch parameters"); 1080 gk20a_warn(cde_ctx->dev, "cde: failed to patch parameters");
1081 goto exit_unlock; 1081 goto exit_unlock;
1082 } 1082 }
1083 1083
@@ -1140,19 +1140,19 @@ __releases(&cde_app->mutex)
1140 1140
1141 if (ch->has_timedout) { 1141 if (ch->has_timedout) {
1142 if (cde_ctx->is_temporary) { 1142 if (cde_ctx->is_temporary) {
1143 gk20a_warn(&cde_ctx->pdev->dev, 1143 gk20a_warn(cde_ctx->dev,
1144 "cde: channel had timed out" 1144 "cde: channel had timed out"
1145 " (temporary channel)"); 1145 " (temporary channel)");
1146 /* going to be deleted anyway */ 1146 /* going to be deleted anyway */
1147 } else { 1147 } else {
1148 gk20a_warn(&cde_ctx->pdev->dev, 1148 gk20a_warn(cde_ctx->dev,
1149 "cde: channel had timed out" 1149 "cde: channel had timed out"
1150 ", reloading"); 1150 ", reloading");
1151 /* mark it to be deleted, replace with a new one */ 1151 /* mark it to be deleted, replace with a new one */
1152 mutex_lock(&cde_app->mutex); 1152 mutex_lock(&cde_app->mutex);
1153 cde_ctx->is_temporary = true; 1153 cde_ctx->is_temporary = true;
1154 if (gk20a_cde_create_context(g)) { 1154 if (gk20a_cde_create_context(g)) {
1155 gk20a_err(&cde_ctx->pdev->dev, 1155 gk20a_err(cde_ctx->dev,
1156 "cde: can't replace context"); 1156 "cde: can't replace context");
1157 } 1157 }
1158 mutex_unlock(&cde_app->mutex); 1158 mutex_unlock(&cde_app->mutex);
@@ -1181,14 +1181,14 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
1181 1181
1182 img = gk20a_request_firmware(g, "gpu2cde.bin"); 1182 img = gk20a_request_firmware(g, "gpu2cde.bin");
1183 if (!img) { 1183 if (!img) {
1184 dev_err(&cde_ctx->pdev->dev, "cde: could not fetch the firmware"); 1184 dev_err(cde_ctx->dev, "cde: could not fetch the firmware");
1185 return -ENOSYS; 1185 return -ENOSYS;
1186 } 1186 }
1187 1187
1188 ch = gk20a_open_new_channel_with_cb(g, gk20a_cde_finished_ctx_cb, 1188 ch = gk20a_open_new_channel_with_cb(g, gk20a_cde_finished_ctx_cb,
1189 cde_ctx); 1189 cde_ctx);
1190 if (!ch) { 1190 if (!ch) {
1191 gk20a_warn(&cde_ctx->pdev->dev, "cde: gk20a channel not available"); 1191 gk20a_warn(cde_ctx->dev, "cde: gk20a channel not available");
1192 err = -ENOMEM; 1192 err = -ENOMEM;
1193 goto err_get_gk20a_channel; 1193 goto err_get_gk20a_channel;
1194 } 1194 }
@@ -1198,7 +1198,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
1198 ch->vm = &g->mm.cde.vm; 1198 ch->vm = &g->mm.cde.vm;
1199 err = channel_gk20a_commit_va(ch); 1199 err = channel_gk20a_commit_va(ch);
1200 if (err) { 1200 if (err) {
1201 gk20a_warn(&cde_ctx->pdev->dev, "cde: could not bind vm"); 1201 gk20a_warn(cde_ctx->dev, "cde: could not bind vm");
1202 goto err_commit_va; 1202 goto err_commit_va;
1203 } 1203 }
1204 1204
@@ -1206,7 +1206,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
1206 err = gk20a_alloc_channel_gpfifo(ch, 1206 err = gk20a_alloc_channel_gpfifo(ch,
1207 &(struct nvgpu_alloc_gpfifo_args){1024, 0}); 1207 &(struct nvgpu_alloc_gpfifo_args){1024, 0});
1208 if (err) { 1208 if (err) {
1209 gk20a_warn(&cde_ctx->pdev->dev, "cde: unable to allocate gpfifo"); 1209 gk20a_warn(cde_ctx->dev, "cde: unable to allocate gpfifo");
1210 goto err_alloc_gpfifo; 1210 goto err_alloc_gpfifo;
1211 } 1211 }
1212 1212
@@ -1218,7 +1218,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
1218 false); 1218 false);
1219 1219
1220 if (!vaddr) { 1220 if (!vaddr) {
1221 gk20a_warn(&cde_ctx->pdev->dev, "cde: cannot map compression bit backing store"); 1221 gk20a_warn(cde_ctx->dev, "cde: cannot map compression bit backing store");
1222 err = -ENOMEM; 1222 err = -ENOMEM;
1223 goto err_map_backingstore; 1223 goto err_map_backingstore;
1224 } 1224 }
@@ -1231,7 +1231,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
1231 /* initialise the firmware */ 1231 /* initialise the firmware */
1232 err = gk20a_init_cde_img(cde_ctx, img); 1232 err = gk20a_init_cde_img(cde_ctx, img);
1233 if (err) { 1233 if (err) {
1234 gk20a_warn(&cde_ctx->pdev->dev, "cde: image initialisation failed"); 1234 gk20a_warn(cde_ctx->dev, "cde: image initialisation failed");
1235 goto err_init_cde_img; 1235 goto err_init_cde_img;
1236 } 1236 }
1237 1237
@@ -1248,7 +1248,7 @@ err_alloc_gpfifo:
1248err_commit_va: 1248err_commit_va:
1249err_get_gk20a_channel: 1249err_get_gk20a_channel:
1250 release_firmware(img); 1250 release_firmware(img);
1251 dev_err(&cde_ctx->pdev->dev, "cde: couldn't initialise buffer converter: %d", 1251 dev_err(cde_ctx->dev, "cde: couldn't initialise buffer converter: %d",
1252 err); 1252 err);
1253 return err; 1253 return err;
1254} 1254}
@@ -1386,17 +1386,17 @@ static int gk20a_buffer_convert_gpu_to_cde_v1(
1386 g->ops.cde.get_program_numbers(g, block_height_log2, 1386 g->ops.cde.get_program_numbers(g, block_height_log2,
1387 &hprog, &vprog); 1387 &hprog, &vprog);
1388 else { 1388 else {
1389 gk20a_warn(&g->dev->dev, "cde: chip not supported"); 1389 gk20a_warn(g->dev, "cde: chip not supported");
1390 return -ENOSYS; 1390 return -ENOSYS;
1391 } 1391 }
1392 1392
1393 if (hprog < 0 || vprog < 0) { 1393 if (hprog < 0 || vprog < 0) {
1394 gk20a_warn(&g->dev->dev, "cde: could not determine programs"); 1394 gk20a_warn(g->dev, "cde: could not determine programs");
1395 return -ENOSYS; 1395 return -ENOSYS;
1396 } 1396 }
1397 1397
1398 if (xtiles > 8192 / 8 || ytiles > 8192 / 8) 1398 if (xtiles > 8192 / 8 || ytiles > 8192 / 8)
1399 gk20a_warn(&g->dev->dev, "cde: surface is exceptionally large (xtiles=%d, ytiles=%d)", 1399 gk20a_warn(g->dev, "cde: surface is exceptionally large (xtiles=%d, ytiles=%d)",
1400 xtiles, ytiles); 1400 xtiles, ytiles);
1401 1401
1402 gk20a_dbg(gpu_dbg_cde, "w=%d, h=%d, bh_log2=%d, compbits_hoffset=0x%llx, compbits_voffset=0x%llx, scatterbuffer_offset=0x%llx", 1402 gk20a_dbg(gpu_dbg_cde, "w=%d, h=%d, bh_log2=%d, compbits_hoffset=0x%llx, compbits_voffset=0x%llx, scatterbuffer_offset=0x%llx",
@@ -1645,9 +1645,9 @@ static const struct file_operations gk20a_cde_reload_fops = {
1645 .write = gk20a_cde_reload_write, 1645 .write = gk20a_cde_reload_write,
1646}; 1646};
1647 1647
1648void gk20a_cde_debugfs_init(struct platform_device *dev) 1648void gk20a_cde_debugfs_init(struct device *dev)
1649{ 1649{
1650 struct gk20a_platform *platform = platform_get_drvdata(dev); 1650 struct gk20a_platform *platform = dev_get_drvdata(dev);
1651 struct gk20a *g = get_gk20a(dev); 1651 struct gk20a *g = get_gk20a(dev);
1652 1652
1653 debugfs_create_u32("cde_parameter", S_IWUSR | S_IRUGO, 1653 debugfs_create_u32("cde_parameter", S_IWUSR | S_IRUGO,