summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/cde.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-04-18 22:39:46 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-05-09 21:26:04 -0400
commitdd739fcb039d51606e9a5454ec0aab17bcb01965 (patch)
tree806ba8575d146367ad1be00086ca0cdae35a6b28 /drivers/gpu/nvgpu/common/linux/cde.c
parent7e66f2a63d4855e763fa768047dfc32f6f96b771 (diff)
gpu: nvgpu: Remove gk20a_dbg* functions
Switch all logging to nvgpu_log*(). gk20a_dbg* macros are intentionally left there because of use from other repositories. Because the new functions do not work without a pointer to struct gk20a, and piping it just for logging is excessive, some log messages are deleted. Change-Id: I00e22e75fe4596a330bb0282ab4774b3639ee31e Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1704148 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/cde.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/cde.c54
1 files changed, 28 insertions, 26 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/cde.c b/drivers/gpu/nvgpu/common/linux/cde.c
index 7c92246c..511d564f 100644
--- a/drivers/gpu/nvgpu/common/linux/cde.c
+++ b/drivers/gpu/nvgpu/common/linux/cde.c
@@ -464,7 +464,7 @@ static int gk20a_cde_patch_params(struct gk20a_cde_ctx *cde_ctx)
464 new_data = cde_ctx->user_param_values[user_id]; 464 new_data = cde_ctx->user_param_values[user_id];
465 } 465 }
466 466
467 gk20a_dbg(gpu_dbg_cde, "cde: patch: idx_in_file=%d param_id=%d target_buf=%u target_byte_offset=%lld data_value=0x%llx data_offset/data_diff=%lld data_type=%d data_shift=%d data_mask=0x%llx", 467 nvgpu_log(g, gpu_dbg_cde, "cde: patch: idx_in_file=%d param_id=%d target_buf=%u target_byte_offset=%lld data_value=0x%llx data_offset/data_diff=%lld data_type=%d data_shift=%d data_mask=0x%llx",
468 i, param->id, param->target_buf, 468 i, param->id, param->target_buf,
469 param->target_byte_offset, new_data, 469 param->target_byte_offset, new_data,
470 param->data_offset, param->type, param->shift, 470 param->data_offset, param->type, param->shift,
@@ -790,8 +790,9 @@ __acquires(&cde_app->mutex)
790__releases(&cde_app->mutex) 790__releases(&cde_app->mutex)
791{ 791{
792 struct gk20a_cde_app *cde_app = &cde_ctx->l->cde_app; 792 struct gk20a_cde_app *cde_app = &cde_ctx->l->cde_app;
793 struct gk20a *g = &cde_ctx->l->g;
793 794
794 gk20a_dbg(gpu_dbg_cde_ctx, "releasing use on %p", cde_ctx); 795 nvgpu_log(g, gpu_dbg_cde_ctx, "releasing use on %p", cde_ctx);
795 trace_gk20a_cde_release(cde_ctx); 796 trace_gk20a_cde_release(cde_ctx);
796 797
797 nvgpu_mutex_acquire(&cde_app->mutex); 798 nvgpu_mutex_acquire(&cde_app->mutex);
@@ -801,7 +802,7 @@ __releases(&cde_app->mutex)
801 nvgpu_list_move(&cde_ctx->list, &cde_app->free_contexts); 802 nvgpu_list_move(&cde_ctx->list, &cde_app->free_contexts);
802 cde_app->ctx_usecount--; 803 cde_app->ctx_usecount--;
803 } else { 804 } else {
804 gk20a_dbg_info("double release cde context %p", cde_ctx); 805 nvgpu_log_info(g, "double release cde context %p", cde_ctx);
805 } 806 }
806 807
807 nvgpu_mutex_release(&cde_app->mutex); 808 nvgpu_mutex_release(&cde_app->mutex);
@@ -823,7 +824,7 @@ __releases(&cde_app->mutex)
823 if (cde_ctx->in_use || !cde_app->initialised) 824 if (cde_ctx->in_use || !cde_app->initialised)
824 return; 825 return;
825 826
826 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, 827 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx,
827 "cde: attempting to delete temporary %p", cde_ctx); 828 "cde: attempting to delete temporary %p", cde_ctx);
828 829
829 err = gk20a_busy(g); 830 err = gk20a_busy(g);
@@ -837,7 +838,7 @@ __releases(&cde_app->mutex)
837 838
838 nvgpu_mutex_acquire(&cde_app->mutex); 839 nvgpu_mutex_acquire(&cde_app->mutex);
839 if (cde_ctx->in_use || !cde_app->initialised) { 840 if (cde_ctx->in_use || !cde_app->initialised) {
840 gk20a_dbg(gpu_dbg_cde_ctx, 841 nvgpu_log(g, gpu_dbg_cde_ctx,
841 "cde: context use raced, not deleting %p", 842 "cde: context use raced, not deleting %p",
842 cde_ctx); 843 cde_ctx);
843 goto out; 844 goto out;
@@ -847,7 +848,7 @@ __releases(&cde_app->mutex)
847 "double pending %p", cde_ctx); 848 "double pending %p", cde_ctx);
848 849
849 gk20a_cde_remove_ctx(cde_ctx); 850 gk20a_cde_remove_ctx(cde_ctx);
850 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, 851 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx,
851 "cde: destroyed %p count=%d use=%d max=%d", 852 "cde: destroyed %p count=%d use=%d max=%d",
852 cde_ctx, cde_app->ctx_count, cde_app->ctx_usecount, 853 cde_ctx, cde_app->ctx_count, cde_app->ctx_usecount,
853 cde_app->ctx_count_top); 854 cde_app->ctx_count_top);
@@ -874,7 +875,7 @@ __must_hold(&cde_app->mutex)
874 if (!nvgpu_list_empty(&cde_app->free_contexts)) { 875 if (!nvgpu_list_empty(&cde_app->free_contexts)) {
875 cde_ctx = nvgpu_list_first_entry(&cde_app->free_contexts, 876 cde_ctx = nvgpu_list_first_entry(&cde_app->free_contexts,
876 gk20a_cde_ctx, list); 877 gk20a_cde_ctx, list);
877 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, 878 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx,
878 "cde: got free %p count=%d use=%d max=%d", 879 "cde: got free %p count=%d use=%d max=%d",
879 cde_ctx, cde_app->ctx_count, 880 cde_ctx, cde_app->ctx_count,
880 cde_app->ctx_usecount, 881 cde_app->ctx_usecount,
@@ -893,7 +894,7 @@ __must_hold(&cde_app->mutex)
893 894
894 /* no free contexts, get a temporary one */ 895 /* no free contexts, get a temporary one */
895 896
896 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, 897 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx,
897 "cde: no free contexts, count=%d", 898 "cde: no free contexts, count=%d",
898 cde_app->ctx_count); 899 cde_app->ctx_count);
899 900
@@ -967,7 +968,7 @@ static struct gk20a_cde_ctx *gk20a_cde_allocate_context(struct nvgpu_os_linux *l
967 INIT_DELAYED_WORK(&cde_ctx->ctx_deleter_work, 968 INIT_DELAYED_WORK(&cde_ctx->ctx_deleter_work,
968 gk20a_cde_ctx_deleter_fn); 969 gk20a_cde_ctx_deleter_fn);
969 970
970 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: allocated %p", cde_ctx); 971 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: allocated %p", cde_ctx);
971 trace_gk20a_cde_allocate_context(cde_ctx); 972 trace_gk20a_cde_allocate_context(cde_ctx);
972 return cde_ctx; 973 return cde_ctx;
973} 974}
@@ -1005,7 +1006,7 @@ __releases(&l->cde_app->mutex)
1005 u32 submit_op; 1006 u32 submit_op;
1006 struct dma_buf_attachment *attachment; 1007 struct dma_buf_attachment *attachment;
1007 1008
1008 gk20a_dbg(gpu_dbg_cde, "compbits_byte_offset=%llu scatterbuffer_byte_offset=%llu", 1009 nvgpu_log(g, gpu_dbg_cde, "compbits_byte_offset=%llu scatterbuffer_byte_offset=%llu",
1009 compbits_byte_offset, scatterbuffer_byte_offset); 1010 compbits_byte_offset, scatterbuffer_byte_offset);
1010 1011
1011 /* scatter buffer must be after compbits buffer */ 1012 /* scatter buffer must be after compbits buffer */
@@ -1055,11 +1056,11 @@ __releases(&l->cde_app->mutex)
1055 compbits_byte_offset; 1056 compbits_byte_offset;
1056 } 1057 }
1057 1058
1058 gk20a_dbg(gpu_dbg_cde, "map_offset=%llu map_size=%llu", 1059 nvgpu_log(g, gpu_dbg_cde, "map_offset=%llu map_size=%llu",
1059 map_offset, map_size); 1060 map_offset, map_size);
1060 gk20a_dbg(gpu_dbg_cde, "mapped_compbits_offset=%llu compbits_size=%llu", 1061 nvgpu_log(g, gpu_dbg_cde, "mapped_compbits_offset=%llu compbits_size=%llu",
1061 mapped_compbits_offset, compbits_size); 1062 mapped_compbits_offset, compbits_size);
1062 gk20a_dbg(gpu_dbg_cde, "mapped_scatterbuffer_offset=%llu scatterbuffer_size=%llu", 1063 nvgpu_log(g, gpu_dbg_cde, "mapped_scatterbuffer_offset=%llu scatterbuffer_size=%llu",
1063 mapped_scatterbuffer_offset, scatterbuffer_size); 1064 mapped_scatterbuffer_offset, scatterbuffer_size);
1064 1065
1065 1066
@@ -1096,7 +1097,7 @@ __releases(&l->cde_app->mutex)
1096 1097
1097 scatter_buffer = surface + scatterbuffer_byte_offset; 1098 scatter_buffer = surface + scatterbuffer_byte_offset;
1098 1099
1099 gk20a_dbg(gpu_dbg_cde, "surface=0x%p scatterBuffer=0x%p", 1100 nvgpu_log(g, gpu_dbg_cde, "surface=0x%p scatterBuffer=0x%p",
1100 surface, scatter_buffer); 1101 surface, scatter_buffer);
1101 sgt = gk20a_mm_pin(dev_from_gk20a(g), compbits_scatter_buf, 1102 sgt = gk20a_mm_pin(dev_from_gk20a(g), compbits_scatter_buf,
1102 &attachment); 1103 &attachment);
@@ -1163,11 +1164,11 @@ __releases(&l->cde_app->mutex)
1163 goto exit_unmap_surface; 1164 goto exit_unmap_surface;
1164 } 1165 }
1165 1166
1166 gk20a_dbg(gpu_dbg_cde, "cde: buffer=cbc, size=%zu, gpuva=%llx\n", 1167 nvgpu_log(g, gpu_dbg_cde, "cde: buffer=cbc, size=%zu, gpuva=%llx\n",
1167 g->gr.compbit_store.mem.size, cde_ctx->backing_store_vaddr); 1168 g->gr.compbit_store.mem.size, cde_ctx->backing_store_vaddr);
1168 gk20a_dbg(gpu_dbg_cde, "cde: buffer=compbits, size=%llu, gpuva=%llx\n", 1169 nvgpu_log(g, gpu_dbg_cde, "cde: buffer=compbits, size=%llu, gpuva=%llx\n",
1169 cde_ctx->compbit_size, cde_ctx->compbit_vaddr); 1170 cde_ctx->compbit_size, cde_ctx->compbit_vaddr);
1170 gk20a_dbg(gpu_dbg_cde, "cde: buffer=scatterbuffer, size=%llu, gpuva=%llx\n", 1171 nvgpu_log(g, gpu_dbg_cde, "cde: buffer=scatterbuffer, size=%llu, gpuva=%llx\n",
1171 cde_ctx->scatterbuffer_size, cde_ctx->scatterbuffer_vaddr); 1172 cde_ctx->scatterbuffer_size, cde_ctx->scatterbuffer_vaddr);
1172 1173
1173 /* take always the postfence as it is needed for protecting the 1174 /* take always the postfence as it is needed for protecting the
@@ -1234,9 +1235,9 @@ __releases(&cde_app->mutex)
1234 return; 1235 return;
1235 1236
1236 trace_gk20a_cde_finished_ctx_cb(cde_ctx); 1237 trace_gk20a_cde_finished_ctx_cb(cde_ctx);
1237 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: finished %p", cde_ctx); 1238 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: finished %p", cde_ctx);
1238 if (!cde_ctx->in_use) 1239 if (!cde_ctx->in_use)
1239 gk20a_dbg_info("double finish cde context %p on channel %p", 1240 nvgpu_log_info(g, "double finish cde context %p on channel %p",
1240 cde_ctx, ch); 1241 cde_ctx, ch);
1241 1242
1242 if (ch->has_timedout) { 1243 if (ch->has_timedout) {
@@ -1406,12 +1407,13 @@ __acquires(&cde_app->mutex)
1406__releases(&cde_app->mutex) 1407__releases(&cde_app->mutex)
1407{ 1408{
1408 struct gk20a_cde_app *cde_app = &l->cde_app; 1409 struct gk20a_cde_app *cde_app = &l->cde_app;
1410 struct gk20a *g = &l->g;
1409 int err; 1411 int err;
1410 1412
1411 if (cde_app->initialised) 1413 if (cde_app->initialised)
1412 return 0; 1414 return 0;
1413 1415
1414 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: init"); 1416 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: init");
1415 1417
1416 err = nvgpu_mutex_init(&cde_app->mutex); 1418 err = nvgpu_mutex_init(&cde_app->mutex);
1417 if (err) 1419 if (err)
@@ -1430,7 +1432,7 @@ __releases(&cde_app->mutex)
1430 cde_app->initialised = true; 1432 cde_app->initialised = true;
1431 1433
1432 nvgpu_mutex_release(&cde_app->mutex); 1434 nvgpu_mutex_release(&cde_app->mutex);
1433 gk20a_dbg(gpu_dbg_cde_ctx, "cde: init finished: %d", err); 1435 nvgpu_log(g, gpu_dbg_cde_ctx, "cde: init finished: %d", err);
1434 1436
1435 if (err) 1437 if (err)
1436 nvgpu_mutex_destroy(&cde_app->mutex); 1438 nvgpu_mutex_destroy(&cde_app->mutex);
@@ -1528,14 +1530,14 @@ static int gk20a_buffer_convert_gpu_to_cde_v1(
1528 nvgpu_warn(g, "cde: surface is exceptionally large (xtiles=%d, ytiles=%d)", 1530 nvgpu_warn(g, "cde: surface is exceptionally large (xtiles=%d, ytiles=%d)",
1529 xtiles, ytiles); 1531 xtiles, ytiles);
1530 1532
1531 gk20a_dbg(gpu_dbg_cde, "w=%d, h=%d, bh_log2=%d, compbits_hoffset=0x%llx, compbits_voffset=0x%llx, scatterbuffer_offset=0x%llx", 1533 nvgpu_log(g, gpu_dbg_cde, "w=%d, h=%d, bh_log2=%d, compbits_hoffset=0x%llx, compbits_voffset=0x%llx, scatterbuffer_offset=0x%llx",
1532 width, height, block_height_log2, 1534 width, height, block_height_log2,
1533 compbits_hoffset, compbits_voffset, scatterbuffer_offset); 1535 compbits_hoffset, compbits_voffset, scatterbuffer_offset);
1534 gk20a_dbg(gpu_dbg_cde, "resolution (%d, %d) tiles (%d, %d)", 1536 nvgpu_log(g, gpu_dbg_cde, "resolution (%d, %d) tiles (%d, %d)",
1535 width, height, xtiles, ytiles); 1537 width, height, xtiles, ytiles);
1536 gk20a_dbg(gpu_dbg_cde, "group (%d, %d) gridH (%d, %d) gridV (%d, %d)", 1538 nvgpu_log(g, gpu_dbg_cde, "group (%d, %d) gridH (%d, %d) gridV (%d, %d)",
1537 wgx, wgy, gridw_h, gridh_h, gridw_v, gridh_v); 1539 wgx, wgy, gridw_h, gridh_h, gridw_v, gridh_v);
1538 gk20a_dbg(gpu_dbg_cde, "hprog=%d, offset=0x%x, regs=%d, vprog=%d, offset=0x%x, regs=%d", 1540 nvgpu_log(g, gpu_dbg_cde, "hprog=%d, offset=0x%x, regs=%d, vprog=%d, offset=0x%x, regs=%d",
1539 hprog, 1541 hprog,
1540 l->cde_app.arrays[ARRAY_PROGRAM_OFFSET][hprog], 1542 l->cde_app.arrays[ARRAY_PROGRAM_OFFSET][hprog],
1541 l->cde_app.arrays[ARRAY_REGISTER_COUNT][hprog], 1543 l->cde_app.arrays[ARRAY_REGISTER_COUNT][hprog],
@@ -1634,7 +1636,7 @@ static int gk20a_buffer_convert_gpu_to_cde(
1634 if (!l->cde_app.initialised) 1636 if (!l->cde_app.initialised)
1635 return -ENOSYS; 1637 return -ENOSYS;
1636 1638
1637 gk20a_dbg(gpu_dbg_cde, "firmware version = %d\n", 1639 nvgpu_log(g, gpu_dbg_cde, "firmware version = %d\n",
1638 l->cde_app.firmware_version); 1640 l->cde_app.firmware_version);
1639 1641
1640 if (l->cde_app.firmware_version == 1) { 1642 if (l->cde_app.firmware_version == 1) {