From 3ba374a5d94f8c2067731155afaf79f03e6c390c Mon Sep 17 00:00:00 2001 From: Terje Bergstrom Date: Thu, 30 Mar 2017 07:44:03 -0700 Subject: gpu: nvgpu: gk20a: Use new error macro gk20a_err() and gk20a_warn() require a struct device pointer, which is not portable across operating systems. The new nvgpu_err() and nvgpu_warn() macros take struct gk20a pointer. Convert code to use the more portable macros. JIRA NVGPU-16 Change-Id: Ia51f36d94c5ce57a5a0ab83b3c83a6bce09e2d5c Signed-off-by: Terje Bergstrom Reviewed-on: http://git-master/r/1331694 Reviewed-by: svccoveritychecker Reviewed-by: Alex Waterman GVS: Gerrit_Virtual_Submit --- drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c | 14 +- drivers/gpu/nvgpu/gk20a/bus_gk20a.c | 3 +- drivers/gpu/nvgpu/gk20a/cde_gk20a.c | 109 ++++----- drivers/gpu/nvgpu/gk20a/ce2_gk20a.c | 16 +- drivers/gpu/nvgpu/gk20a/channel_gk20a.c | 49 ++-- drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c | 28 +-- drivers/gpu/nvgpu/gk20a/clk_gk20a.c | 13 +- drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c | 14 +- drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c | 4 +- drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c | 84 +++---- drivers/gpu/nvgpu/gk20a/debug_gk20a.c | 5 +- drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c | 14 +- drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 100 ++++---- drivers/gpu/nvgpu/gk20a/gk20a.c | 44 ++-- drivers/gpu/nvgpu/gk20a/gk20a_scale.c | 6 +- drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c | 8 +- drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c | 5 +- drivers/gpu/nvgpu/gk20a/gr_gk20a.c | 329 +++++++++++++-------------- drivers/gpu/nvgpu/gk20a/hal.c | 4 +- drivers/gpu/nvgpu/gk20a/hal_gk20a.c | 4 +- drivers/gpu/nvgpu/gk20a/ltc_gk20a.c | 8 +- drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 119 +++++----- drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | 208 ++++++++--------- drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c | 5 +- drivers/gpu/nvgpu/gk20a/regops_gk20a.c | 12 +- drivers/gpu/nvgpu/gk20a/sched_gk20a.c | 7 +- drivers/gpu/nvgpu/gk20a/sim_gk20a.c | 26 ++- drivers/gpu/nvgpu/gk20a/tsg_gk20a.c | 6 +- 28 files changed, 608 insertions(+), 636 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c index cda9fb8d..ce9a4176 100644 --- a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c +++ b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c @@ -28,6 +28,8 @@ #include "gk20a/gk20a.h" #include "gk20a/fence_gk20a.h" +#include + #define HZ_TO_MHZ(a) ((a > 0xF414F9CD7) ? 0xffff : (a >> 32) ? \ (u32) ((a * 0x10C8ULL) >> 32) : (u16) ((u32) a/MHZ)) #define MHZ_TO_HZ(a) ((u64)a * MHZ) @@ -352,7 +354,7 @@ static int nvgpu_gpu_ioctl_set_mmu_debug_mode( struct nvgpu_gpu_mmu_debug_mode_args *args) { if (gk20a_busy(g)) { - gk20a_err(dev_from_gk20a(g), "failed to power on gpu\n"); + nvgpu_err(g, "failed to power on gpu\n"); return -EINVAL; } @@ -521,7 +523,7 @@ static inline int get_timestamps_zipper(struct gk20a *g, unsigned int i = 0; if (gk20a_busy(g)) { - gk20a_err(dev_from_gk20a(g), "GPU not powered on\n"); + nvgpu_err(g, "GPU not powered on\n"); err = -EINVAL; goto end; } @@ -560,7 +562,7 @@ static int nvgpu_gpu_get_cpu_time_correlation_info( get_cpu_timestamp = get_cpu_timestamp_timeofday; break; default: - gk20a_err(dev_from_gk20a(g), "invalid cpu clock source id\n"); + nvgpu_err(g, "invalid cpu clock source id\n"); return -EINVAL; } @@ -625,7 +627,7 @@ static int nvgpu_gpu_get_engine_info( break; default: - gk20a_err(dev_from_gk20a(g), "Unmapped engine enum %u\n", + nvgpu_err(g, "Unmapped engine enum %u\n", engine_enum); continue; } @@ -677,7 +679,7 @@ static int nvgpu_gpu_alloc_vidmem(struct gk20a *g, if (align > roundup_pow_of_two(args->in.size)) { /* log this special case, buddy allocator detail */ - gk20a_warn(dev_from_gk20a(g), + nvgpu_warn(g, "alignment larger than buffer size rounded up to power of 2 is not supported"); return -EINVAL; } @@ -1510,7 +1512,7 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg break; default: - dev_dbg(dev_from_gk20a(g), "unrecognized gpu ioctl cmd: 0x%x", cmd); + gk20a_dbg_info("unrecognized gpu ioctl cmd: 0x%x", cmd); err = -ENOTTY; break; } diff --git a/drivers/gpu/nvgpu/gk20a/bus_gk20a.c b/drivers/gpu/nvgpu/gk20a/bus_gk20a.c index dd96df16..3119e373 100644 --- a/drivers/gpu/nvgpu/gk20a/bus_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/bus_gk20a.c @@ -17,6 +17,7 @@ #include #include +#include #include "gk20a.h" @@ -126,7 +127,7 @@ int gk20a_read_ptimer(struct gk20a *g, u64 *value) } /* too many iterations, bail out */ - gk20a_err(dev_from_gk20a(g), "failed to read ptimer"); + nvgpu_err(g, "failed to read ptimer"); return -EBUSY; } diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c index 16baaa39..296a8af0 100644 --- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "gk20a.h" #include "channel_gk20a.h" @@ -228,19 +229,20 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx, struct gk20a_cde_hdr_buf *buf) { struct nvgpu_mem *mem; + struct gk20a *g = cde_ctx->g; int err; /* check that the file can hold the buf */ if (buf->data_byte_offset != 0 && buf->data_byte_offset + buf->num_bytes > img->size) { - gk20a_warn(cde_ctx->dev, "cde: invalid data section. buffer idx = %d", + nvgpu_warn(g, "cde: invalid data section. buffer idx = %d", cde_ctx->num_bufs); return -EINVAL; } /* check that we have enough buf elems available */ if (cde_ctx->num_bufs >= MAX_CDE_BUFS) { - gk20a_warn(cde_ctx->dev, "cde: invalid data section. buffer idx = %d", + nvgpu_warn(g, "cde: invalid data section. buffer idx = %d", cde_ctx->num_bufs); return -ENOMEM; } @@ -249,7 +251,7 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx, mem = cde_ctx->mem + cde_ctx->num_bufs; err = nvgpu_dma_alloc_map_sys(cde_ctx->vm, buf->num_bytes, mem); if (err) { - gk20a_warn(cde_ctx->dev, "cde: could not allocate device memory. buffer idx = %d", + nvgpu_warn(g, "cde: could not allocate device memory. buffer idx = %d", cde_ctx->num_bufs); return -ENOMEM; } @@ -267,6 +269,7 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx, static int gk20a_replace_data(struct gk20a_cde_ctx *cde_ctx, void *target, int type, s32 shift, u64 mask, u64 value) { + struct gk20a *g = cde_ctx->g; u32 *target_mem_ptr = target; u64 *target_mem_ptr_u64 = target; u64 current_value, new_value; @@ -287,7 +290,7 @@ static int gk20a_replace_data(struct gk20a_cde_ctx *cde_ctx, void *target, current_value = (u64)(current_value >> 32) | (u64)(current_value << 32); } else { - gk20a_warn(cde_ctx->dev, "cde: unknown type. type=%d", + nvgpu_warn(g, "cde: unknown type. type=%d", type); return -EINVAL; } @@ -315,13 +318,14 @@ static int gk20a_init_cde_replace(struct gk20a_cde_ctx *cde_ctx, { struct nvgpu_mem *source_mem; struct nvgpu_mem *target_mem; + struct gk20a *g = cde_ctx->g; u32 *target_mem_ptr; u64 vaddr; int err; if (replace->target_buf >= cde_ctx->num_bufs || replace->source_buf >= cde_ctx->num_bufs) { - gk20a_warn(cde_ctx->dev, "cde: invalid buffer. target_buf=%u, source_buf=%u, num_bufs=%d", + nvgpu_warn(g, "cde: invalid buffer. target_buf=%u, source_buf=%u, num_bufs=%d", replace->target_buf, replace->source_buf, cde_ctx->num_bufs); return -EINVAL; @@ -333,7 +337,7 @@ static int gk20a_init_cde_replace(struct gk20a_cde_ctx *cde_ctx, if (source_mem->size < (replace->source_byte_offset + 3) || target_mem->size < (replace->target_byte_offset + 3)) { - gk20a_warn(cde_ctx->dev, "cde: invalid buffer offsets. target_buf_offs=%lld, source_buf_offs=%lld, source_buf_size=%zu, dest_buf_size=%zu", + nvgpu_warn(g, "cde: invalid buffer offsets. target_buf_offs=%lld, source_buf_offs=%lld, source_buf_size=%zu, dest_buf_size=%zu", replace->target_byte_offset, replace->source_byte_offset, source_mem->size, @@ -350,7 +354,7 @@ static int gk20a_init_cde_replace(struct gk20a_cde_ctx *cde_ctx, replace->shift, replace->mask, vaddr); if (err) { - gk20a_warn(cde_ctx->dev, "cde: replace failed. err=%d, target_buf=%u, target_buf_offs=%lld, source_buf=%u, source_buf_offs=%lld", + nvgpu_warn(g, "cde: replace failed. err=%d, target_buf=%u, target_buf_offs=%lld, source_buf=%u, source_buf_offs=%lld", err, replace->target_buf, replace->target_byte_offset, replace->source_buf, @@ -438,7 +442,7 @@ static int gk20a_cde_patch_params(struct gk20a_cde_ctx *cde_ctx) param->shift, param->mask, new_data); if (err) { - gk20a_warn(cde_ctx->dev, "cde: patch failed. err=%d, idx=%d, id=%d, target_buf=%u, target_buf_offs=%lld, patch_value=%llu", + nvgpu_warn(g, "cde: patch failed. err=%d, idx=%d, id=%d, target_buf=%u, target_buf_offs=%lld, patch_value=%llu", err, i, param->id, param->target_buf, param->target_byte_offset, new_data); return err; @@ -453,9 +457,10 @@ static int gk20a_init_cde_param(struct gk20a_cde_ctx *cde_ctx, struct gk20a_cde_hdr_param *param) { struct nvgpu_mem *target_mem; + struct gk20a *g = cde_ctx->g; if (param->target_buf >= cde_ctx->num_bufs) { - gk20a_warn(cde_ctx->dev, "cde: invalid buffer parameter. param idx = %d, target_buf=%u, num_bufs=%u", + nvgpu_warn(g, "cde: invalid buffer parameter. param idx = %d, target_buf=%u, num_bufs=%u", cde_ctx->num_params, param->target_buf, cde_ctx->num_bufs); return -EINVAL; @@ -463,7 +468,7 @@ static int gk20a_init_cde_param(struct gk20a_cde_ctx *cde_ctx, target_mem = cde_ctx->mem + param->target_buf; if (target_mem->size < (param->target_byte_offset + 3)) { - gk20a_warn(cde_ctx->dev, "cde: invalid buffer parameter. param idx = %d, target_buf_offs=%lld, target_buf_size=%zu", + nvgpu_warn(g, "cde: invalid buffer parameter. param idx = %d, target_buf_offs=%lld, target_buf_size=%zu", cde_ctx->num_params, param->target_byte_offset, target_mem->size); return -EINVAL; @@ -471,14 +476,14 @@ static int gk20a_init_cde_param(struct gk20a_cde_ctx *cde_ctx, /* does this parameter fit into our parameter structure */ if (cde_ctx->num_params >= MAX_CDE_PARAMS) { - gk20a_warn(cde_ctx->dev, "cde: no room for new parameters param idx = %d", + nvgpu_warn(g, "cde: no room for new parameters param idx = %d", cde_ctx->num_params); return -ENOMEM; } /* is the given id valid? */ if (param->id >= NUM_RESERVED_PARAMS + MAX_CDE_USER_PARAMS) { - gk20a_warn(cde_ctx->dev, "cde: parameter id is not valid. param idx = %d, id=%u, max=%u", + nvgpu_warn(g, "cde: parameter id is not valid. param idx = %d, id=%u, max=%u", param->id, cde_ctx->num_params, NUM_RESERVED_PARAMS + MAX_CDE_USER_PARAMS); return -EINVAL; @@ -494,6 +499,7 @@ static int gk20a_init_cde_required_class(struct gk20a_cde_ctx *cde_ctx, const struct firmware *img, u32 required_class) { + struct gk20a *g = cde_ctx->g; struct nvgpu_alloc_obj_ctx_args alloc_obj_ctx; int err; @@ -505,7 +511,7 @@ static int gk20a_init_cde_required_class(struct gk20a_cde_ctx *cde_ctx, err = gk20a_alloc_obj_ctx(cde_ctx->ch, &alloc_obj_ctx); if (err) { - gk20a_warn(cde_ctx->dev, "cde: failed to allocate ctx. err=%d", + nvgpu_warn(g, "cde: failed to allocate ctx. err=%d", err); return err; } @@ -519,6 +525,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx, struct gk20a_cde_cmd_elem *cmd_elem, u32 num_elems) { + struct gk20a *g = cde_ctx->g; struct nvgpu_gpfifo **gpfifo, *gpfifo_elem; u32 *num_entries; unsigned int i; @@ -531,7 +538,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx, gpfifo = &cde_ctx->convert_cmd; num_entries = &cde_ctx->convert_cmd_num_entries; } else { - gk20a_warn(cde_ctx->dev, "cde: unknown command. op=%u", + nvgpu_warn(g, "cde: unknown command. op=%u", op); return -EINVAL; } @@ -540,7 +547,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx, *gpfifo = nvgpu_kzalloc(cde_ctx->g, sizeof(struct nvgpu_gpfifo) * num_elems); if (!*gpfifo) { - gk20a_warn(cde_ctx->dev, "cde: could not allocate memory for gpfifo entries"); + nvgpu_warn(g, "cde: could not allocate memory for gpfifo entries"); return -ENOMEM; } @@ -550,7 +557,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx, /* validate the current entry */ if (cmd_elem->target_buf >= cde_ctx->num_bufs) { - gk20a_warn(cde_ctx->dev, "cde: target buffer is not available (target=%u, num_bufs=%u)", + nvgpu_warn(g, "cde: target buffer is not available (target=%u, num_bufs=%u)", cmd_elem->target_buf, cde_ctx->num_bufs); return -EINVAL; } @@ -558,7 +565,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx, target_mem = cde_ctx->mem + cmd_elem->target_buf; if (target_mem->size< cmd_elem->target_byte_offset + cmd_elem->num_bytes) { - gk20a_warn(cde_ctx->dev, "cde: target buffer cannot hold all entries (target_size=%zu, target_byte_offset=%lld, num_bytes=%llu)", + nvgpu_warn(g, "cde: target buffer cannot hold all entries (target_size=%zu, target_byte_offset=%lld, num_bytes=%llu)", target_mem->size, cmd_elem->target_byte_offset, cmd_elem->num_bytes); @@ -582,6 +589,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx, static int gk20a_cde_pack_cmdbufs(struct gk20a_cde_ctx *cde_ctx) { + struct gk20a *g = cde_ctx->g; unsigned long init_bytes = cde_ctx->init_cmd_num_entries * sizeof(struct nvgpu_gpfifo); unsigned long conv_bytes = cde_ctx->convert_cmd_num_entries * @@ -592,8 +600,8 @@ static int gk20a_cde_pack_cmdbufs(struct gk20a_cde_ctx *cde_ctx) /* allocate buffer that has space for both */ combined_cmd = nvgpu_kzalloc(cde_ctx->g, total_bytes); if (!combined_cmd) { - gk20a_warn(cde_ctx->dev, - "cde: could not allocate memory for gpfifo entries"); + nvgpu_warn(g, + "cde: could not allocate memory for gpfifo entries"); return -ENOMEM; } @@ -615,6 +623,7 @@ static int gk20a_cde_pack_cmdbufs(struct gk20a_cde_ctx *cde_ctx) static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx, const struct firmware *img) { + struct gk20a *g = cde_ctx->g; struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app; u32 *data = (u32 *)img->data; u32 num_of_elems; @@ -625,7 +634,7 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx, min_size += 2 * sizeof(u32); if (img->size < min_size) { - gk20a_warn(cde_ctx->dev, "cde: invalid image header"); + nvgpu_warn(g, "cde: invalid image header"); return -EINVAL; } @@ -634,7 +643,7 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx, min_size += num_of_elems * sizeof(*elem); if (img->size < min_size) { - gk20a_warn(cde_ctx->dev, "cde: bad image"); + nvgpu_warn(g, "cde: bad image"); return -EINVAL; } @@ -671,7 +680,7 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx, MAX_CDE_ARRAY_ENTRIES*sizeof(u32)); break; default: - gk20a_warn(cde_ctx->dev, "cde: unknown header element"); + nvgpu_warn(g, "cde: unknown header element"); err = -EINVAL; } @@ -682,13 +691,13 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx, } if (!cde_ctx->init_convert_cmd || !cde_ctx->init_cmd_num_entries) { - gk20a_warn(cde_ctx->dev, "cde: convert command not defined"); + nvgpu_warn(g, "cde: convert command not defined"); err = -EINVAL; goto deinit_image; } if (!cde_ctx->convert_cmd || !cde_ctx->convert_cmd_num_entries) { - gk20a_warn(cde_ctx->dev, "cde: convert command not defined"); + nvgpu_warn(g, "cde: convert command not defined"); err = -EINVAL; goto deinit_image; } @@ -708,6 +717,7 @@ static int gk20a_cde_execute_buffer(struct gk20a_cde_ctx *cde_ctx, u32 op, struct nvgpu_fence *fence, u32 flags, struct gk20a_fence **fence_out) { + struct gk20a *g = cde_ctx->g; struct nvgpu_gpfifo *gpfifo = NULL; int num_entries = 0; @@ -721,12 +731,12 @@ static int gk20a_cde_execute_buffer(struct gk20a_cde_ctx *cde_ctx, gpfifo = cde_ctx->convert_cmd; num_entries = cde_ctx->convert_cmd_num_entries; } else { - gk20a_warn(cde_ctx->dev, "cde: unknown buffer"); + nvgpu_warn(g, "cde: unknown buffer"); return -EINVAL; } if (gpfifo == NULL || num_entries == 0) { - gk20a_warn(cde_ctx->dev, "cde: buffer not available"); + nvgpu_warn(g, "cde: buffer not available"); return -ENOSYS; } @@ -765,7 +775,6 @@ __releases(&cde_app->mutex) struct gk20a_cde_ctx *cde_ctx = container_of(delay_work, struct gk20a_cde_ctx, ctx_deleter_work); struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app; - struct device *dev = cde_ctx->dev; struct gk20a *g = cde_ctx->g; int err; @@ -780,7 +789,7 @@ __releases(&cde_app->mutex) if (err) { /* this context would find new use anyway later, so not freeing * here does not leak anything */ - gk20a_warn(dev, "cde: cannot set gk20a on, postponing" + nvgpu_warn(g, "cde: cannot set gk20a on, postponing" " temp ctx deletion"); return; } @@ -848,7 +857,7 @@ __must_hold(&cde_app->mutex) cde_ctx = gk20a_cde_allocate_context(g); if (IS_ERR(cde_ctx)) { - gk20a_warn(g->dev, "cde: cannot allocate context: %ld", + nvgpu_warn(g, "cde: cannot allocate context: %ld", PTR_ERR(cde_ctx)); return cde_ctx; } @@ -1023,7 +1032,7 @@ __releases(&cde_app->mutex) surface = dma_buf_vmap(compbits_scatter_buf); if (IS_ERR(surface)) { - gk20a_warn(g->dev, + nvgpu_warn(g, "dma_buf_vmap failed"); err = -EINVAL; goto exit_unmap_vaddr; @@ -1035,7 +1044,7 @@ __releases(&cde_app->mutex) surface, scatter_buffer); sgt = gk20a_mm_pin(g->dev, compbits_scatter_buf); if (IS_ERR(sgt)) { - gk20a_warn(g->dev, + nvgpu_warn(g, "mm_pin failed"); err = -EINVAL; goto exit_unmap_surface; @@ -1083,7 +1092,7 @@ __releases(&cde_app->mutex) int id = param->id - NUM_RESERVED_PARAMS; if (id < 0 || id >= MAX_CDE_USER_PARAMS) { - gk20a_warn(cde_ctx->dev, "cde: unknown user parameter"); + nvgpu_warn(g, "cde: unknown user parameter"); err = -EINVAL; goto exit_unmap_surface; } @@ -1093,7 +1102,7 @@ __releases(&cde_app->mutex) /* patch data */ err = gk20a_cde_patch_params(cde_ctx); if (err) { - gk20a_warn(cde_ctx->dev, "cde: failed to patch parameters"); + nvgpu_warn(g, "cde: failed to patch parameters"); goto exit_unmap_surface; } @@ -1160,20 +1169,19 @@ __releases(&cde_app->mutex) if (ch->has_timedout) { if (cde_ctx->is_temporary) { - gk20a_warn(cde_ctx->dev, + nvgpu_warn(g, "cde: channel had timed out" " (temporary channel)"); /* going to be deleted anyway */ } else { - gk20a_warn(cde_ctx->dev, + nvgpu_warn(g, "cde: channel had timed out" ", reloading"); /* mark it to be deleted, replace with a new one */ nvgpu_mutex_acquire(&cde_app->mutex); cde_ctx->is_temporary = true; if (gk20a_cde_create_context(g)) { - gk20a_err(cde_ctx->dev, - "cde: can't replace context"); + nvgpu_err(g, "cde: can't replace context"); } nvgpu_mutex_release(&cde_app->mutex); } @@ -1201,7 +1209,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx) img = nvgpu_request_firmware(g, "gpu2cde.bin", 0); if (!img) { - dev_err(cde_ctx->dev, "cde: could not fetch the firmware"); + nvgpu_err(g, "cde: could not fetch the firmware"); return -ENOSYS; } @@ -1210,7 +1218,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx) -1, false); if (!ch) { - gk20a_warn(cde_ctx->dev, "cde: gk20a channel not available"); + nvgpu_warn(g, "cde: gk20a channel not available"); err = -ENOMEM; goto err_get_gk20a_channel; } @@ -1218,14 +1226,14 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx) /* bind the channel to the vm */ err = __gk20a_vm_bind_channel(&g->mm.cde.vm, ch); if (err) { - gk20a_warn(cde_ctx->dev, "cde: could not bind vm"); + nvgpu_warn(g, "cde: could not bind vm"); goto err_commit_va; } /* allocate gpfifo (1024 should be more than enough) */ err = gk20a_channel_alloc_gpfifo(ch, 1024, 0, 0); if (err) { - gk20a_warn(cde_ctx->dev, "cde: unable to allocate gpfifo"); + nvgpu_warn(g, "cde: unable to allocate gpfifo"); goto err_alloc_gpfifo; } @@ -1238,7 +1246,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx) gr->compbit_store.mem.aperture); if (!vaddr) { - gk20a_warn(cde_ctx->dev, "cde: cannot map compression bit backing store"); + nvgpu_warn(g, "cde: cannot map compression bit backing store"); err = -ENOMEM; goto err_map_backingstore; } @@ -1251,7 +1259,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx) /* initialise the firmware */ err = gk20a_init_cde_img(cde_ctx, img); if (err) { - gk20a_warn(cde_ctx->dev, "cde: image initialisation failed"); + nvgpu_warn(g, "cde: image initialisation failed"); goto err_init_cde_img; } @@ -1268,8 +1276,7 @@ err_alloc_gpfifo: err_commit_va: err_get_gk20a_channel: release_firmware(img); - dev_err(cde_ctx->dev, "cde: couldn't initialise buffer converter: %d", - err); + nvgpu_err(g, "cde: couldn't initialise buffer converter: %d", err); return err; } @@ -1413,17 +1420,17 @@ static int gk20a_buffer_convert_gpu_to_cde_v1( g->ops.cde.get_program_numbers(g, block_height_log2, &hprog, &vprog); else { - gk20a_warn(g->dev, "cde: chip not supported"); + nvgpu_warn(g, "cde: chip not supported"); return -ENOSYS; } if (hprog < 0 || vprog < 0) { - gk20a_warn(g->dev, "cde: could not determine programs"); + nvgpu_warn(g, "cde: could not determine programs"); return -ENOSYS; } if (xtiles > 8192 / 8 || ytiles > 8192 / 8) - gk20a_warn(g->dev, "cde: surface is exceptionally large (xtiles=%d, ytiles=%d)", + nvgpu_warn(g, "cde: surface is exceptionally large (xtiles=%d, ytiles=%d)", xtiles, ytiles); gk20a_dbg(gpu_dbg_cde, "w=%d, h=%d, bh_log2=%d, compbits_hoffset=0x%llx, compbits_voffset=0x%llx, scatterbuffer_offset=0x%llx", @@ -1541,7 +1548,7 @@ static int gk20a_buffer_convert_gpu_to_cde( width, height, block_height_log2, submit_flags, fence_in, state); } else { - dev_err(dev_from_gk20a(g), "unsupported CDE firmware version %d", + nvgpu_err(g, "unsupported CDE firmware version %d", g->cde_app.firmware_version); err = -EINVAL; } @@ -1628,13 +1635,13 @@ int gk20a_mark_compressible_write(struct gk20a *g, u32 buffer_fd, dmabuf = dma_buf_get(buffer_fd); if (IS_ERR(dmabuf)) { - dev_err(dev_from_gk20a(g), "invalid dmabuf"); + nvgpu_err(g, "invalid dmabuf"); return -EINVAL; } err = gk20a_dmabuf_get_state(dmabuf, dev_from_gk20a(g), offset, &state); if (err) { - dev_err(dev_from_gk20a(g), "could not get state from dmabuf"); + nvgpu_err(g, "could not get state from dmabuf"); dma_buf_put(dmabuf); return err; } diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c index f3ac28ea..c502add5 100644 --- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c @@ -31,6 +31,8 @@ #include "gk20a.h" #include "debug_gk20a.h" +#include + #include #include #include @@ -459,7 +461,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev, runlist_id, true); if (!ce_ctx->ch) { - gk20a_err(ce_ctx->dev, "ce: gk20a channel not available"); + nvgpu_err(g, "ce: gk20a channel not available"); goto end; } ce_ctx->ch->wdt_enabled = false; @@ -467,21 +469,21 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev, /* bind the channel to the vm */ err = __gk20a_vm_bind_channel(&g->mm.ce.vm, ce_ctx->ch); if (err) { - gk20a_err(ce_ctx->dev, "ce: could not bind vm"); + nvgpu_err(g, "ce: could not bind vm"); goto end; } /* allocate gpfifo (1024 should be more than enough) */ err = gk20a_channel_alloc_gpfifo(ce_ctx->ch, 1024, 0, 0); if (err) { - gk20a_err(ce_ctx->dev, "ce: unable to allocate gpfifo"); + nvgpu_err(g, "ce: unable to allocate gpfifo"); goto end; } /* allocate command buffer (4096 should be more than enough) from sysmem*/ err = nvgpu_dma_alloc_map_sys(ce_ctx->vm, NVGPU_CE_COMMAND_BUF_SIZE, &ce_ctx->cmd_buf_mem); if (err) { - gk20a_err(ce_ctx->dev, + nvgpu_err(g, "ce: could not allocate command buffer for CE context"); goto end; } @@ -492,7 +494,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev, if (priority != -1) { err = gk20a_fifo_set_priority(ce_ctx->ch, priority); if (err) { - gk20a_err(ce_ctx->dev, + nvgpu_err(g, "ce: could not set the channel priority for CE context"); goto end; } @@ -502,7 +504,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev, if (timeslice != -1) { err = gk20a_fifo_set_timeslice(ce_ctx->ch, timeslice); if (err) { - gk20a_err(ce_ctx->dev, + nvgpu_err(g, "ce: could not set the channel timeslice value for CE context"); goto end; } @@ -512,7 +514,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev, if (runlist_level != -1) { err = gk20a_channel_set_runlist_interleave(ce_ctx->ch, runlist_level); if (err) { - gk20a_err(ce_ctx->dev, + nvgpu_err(g, "ce: could not set the runlist interleave for CE context"); goto end; } diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c index 94d193ed..c684be1f 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "gk20a.h" #include "debug_gk20a.h" @@ -301,7 +302,7 @@ int gk20a_wait_channel_idle(struct channel_gk20a *ch) } while (!nvgpu_timeout_expired(&timeout)); if (!channel_idle) { - gk20a_err(dev_from_gk20a(ch->g), "jobs not freed for channel %d\n", + nvgpu_err(ch->g, "jobs not freed for channel %d\n", ch->hw_chid); return -EBUSY; } @@ -322,7 +323,7 @@ int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch, int ret; if (gk20a_is_channel_marked_as_tsg(ch)) { - gk20a_err(dev_from_gk20a(g), "invalid operation for TSG!\n"); + nvgpu_err(g, "invalid operation for TSG!\n"); return -EINVAL; } @@ -362,7 +363,7 @@ void gk20a_set_error_notifier_locked(struct channel_gk20a *ch, __u32 error) ch->error_notifier->info32 = error; ch->error_notifier->status = 0xffff; - gk20a_err(dev_from_gk20a(ch->g), + nvgpu_err(ch->g, "error notifier set to %d for ch %d", error, ch->hw_chid); } } @@ -398,7 +399,7 @@ static void gk20a_wait_until_counter_is_N( msecs_to_jiffies(5000)) > 0) break; - gk20a_warn(dev_from_gk20a(ch->g), + nvgpu_warn(ch->g, "%s: channel %d, still waiting, %s left: %d, waiting for: %d", caller, ch->hw_chid, counter_name, atomic_read(counter), wait_value); @@ -476,7 +477,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force) nvgpu_spinlock_acquire(&ch->ref_obtain_lock); if (!ch->referenceable) { nvgpu_spinlock_release(&ch->ref_obtain_lock); - gk20a_err(dev_from_gk20a(ch->g), + nvgpu_err(ch->g, "Extra %s() called to channel %u", __func__, ch->hw_chid); return; @@ -795,7 +796,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g, ch = allocate_channel(f); if (ch == NULL) { /* TBD: we want to make this virtualizable */ - gk20a_err(dev_from_gk20a(g), "out of hw chids"); + nvgpu_err(g, "out of hw chids"); return NULL; } @@ -813,7 +814,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g, if (g->ops.fifo.alloc_inst(g, ch)) { ch->g = NULL; free_channel(f, ch); - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "failed to open gk20a channel, out of inst mem"); return NULL; } @@ -873,7 +874,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g, used for inserting commands before/after user submitted buffers. */ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c) { - struct device *d = dev_from_gk20a(c->g); + struct gk20a *g = c->g; struct vm_gk20a *ch_vm = c->vm; struct priv_cmd_queue *q = &c->priv_cmd_q; u32 size; @@ -901,7 +902,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c) err = nvgpu_dma_alloc_map_sys(ch_vm, size, &q->mem); if (err) { - gk20a_err(d, "%s: memory allocation failed\n", __func__); + nvgpu_err(g, "%s: memory allocation failed\n", __func__); goto clean_up; } @@ -938,7 +939,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size, gk20a_dbg_fn("size %d", orig_size); if (!e) { - gk20a_err(dev_from_gk20a(c->g), + nvgpu_err(c->g, "ch %d: priv cmd entry is null", c->hw_chid); return -EINVAL; @@ -1016,7 +1017,7 @@ static int channel_gk20a_alloc_job(struct channel_gk20a *c, if (CIRC_SPACE(put, get, c->joblist.pre_alloc.length)) *job_out = &c->joblist.pre_alloc.jobs[put]; else { - gk20a_warn(dev_from_gk20a(c->g), + nvgpu_warn(c->g, "out of job ringbuffer space\n"); err = -EAGAIN; } @@ -1231,7 +1232,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c, /* an address space needs to have been bound at this point. */ if (!gk20a_channel_as_bound(c)) { - gk20a_err(d, + nvgpu_err(g, "not bound to an address space at time of gpfifo" " allocation."); return -EINVAL; @@ -1239,7 +1240,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c, ch_vm = c->vm; if (c->gpfifo.mem.size) { - gk20a_err(d, "channel %d :" + nvgpu_err(g, "channel %d :" "gpfifo already allocated", c->hw_chid); return -EEXIST; } @@ -1248,7 +1249,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c, gpfifo_size * sizeof(struct nvgpu_gpfifo), &c->gpfifo.mem); if (err) { - gk20a_err(d, "%s: memory allocation failed\n", __func__); + nvgpu_err(g, "%s: memory allocation failed\n", __func__); goto clean_up; } @@ -1334,7 +1335,7 @@ clean_up_unmap: nvgpu_dma_unmap_free(ch_vm, &c->gpfifo.mem); clean_up: memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc)); - gk20a_err(d, "fail"); + nvgpu_err(g, "fail"); return err; } @@ -1607,7 +1608,7 @@ static void gk20a_channel_timeout_handler(struct channel_gk20a *ch) return; } - gk20a_err(dev_from_gk20a(g), "Job on channel %d timed out", + nvgpu_err(g, "Job on channel %d timed out", ch->hw_chid); gk20a_debug_dump(g->dev); @@ -1761,7 +1762,7 @@ static void gk20a_channel_worker_process(struct gk20a *g, int *get) * other reasons than a channel added in the items list * currently, so warn and ack the message. */ - gk20a_warn(g->dev, "Spurious worker event!"); + nvgpu_warn(g, "Spurious worker event!"); ++*get; break; } @@ -1820,7 +1821,7 @@ int nvgpu_channel_worker_init(struct gk20a *g) task = kthread_run(gk20a_channel_poll_worker, g, "nvgpu_channel_poll_%s", g->name); if (IS_ERR(task)) { - gk20a_err(g->dev, "failed to start channel poller thread"); + nvgpu_err(g, "failed to start channel poller thread"); return PTR_ERR(task); } g->channel_worker.poll_task = task; @@ -1853,7 +1854,7 @@ void gk20a_channel_worker_enqueue(struct channel_gk20a *ch) * one ref already, so can't fail. */ if (WARN_ON(!gk20a_channel_get(ch))) { - gk20a_warn(g->dev, "cannot get ch ref for worker!"); + nvgpu_warn(g, "cannot get ch ref for worker!"); return; } @@ -1876,7 +1877,7 @@ void gk20a_channel_worker_enqueue(struct channel_gk20a *ch) int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e) { struct priv_cmd_queue *q = &c->priv_cmd_q; - struct device *d = dev_from_gk20a(c->g); + struct gk20a *g = c->g; if (!e) return 0; @@ -1885,7 +1886,7 @@ int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e) /* read the entry's valid flag before reading its contents */ rmb(); if ((q->get != e->off) && e->off != 0) - gk20a_err(d, "requests out-of-order, ch=%d\n", + nvgpu_err(g, "requests out-of-order, ch=%d\n", c->hw_chid); q->get = e->off + e->size; } @@ -2416,7 +2417,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c, * So, add extra_entries in user request. Also, HW with fifo size N * can accept only N-1 entreis and so the below condition */ if (c->gpfifo.entry_num - 1 < num_entries + extra_entries) { - gk20a_err(d, "not enough gpfifo space allocated"); + nvgpu_err(g, "not enough gpfifo space allocated"); return -ENOMEM; } @@ -2430,7 +2431,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c, /* an address space needs to have been bound at this point. */ if (!gk20a_channel_as_bound(c)) { - gk20a_err(d, + nvgpu_err(g, "not bound to an address space at time of gpfifo" " submission."); return -EINVAL; @@ -2512,7 +2513,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c, /* released by job cleanup via syncpt or sema interrupt */ err = gk20a_busy(g); if (err) { - gk20a_err(d, "failed to host gk20a to submit gpfifo, process %s", + nvgpu_err(g, "failed to host gk20a to submit gpfifo, process %s", current->comm); return err; } diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c index fc5862e1..fbeb1e4a 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c @@ -20,6 +20,7 @@ #include #include +#include #include "channel_sync_gk20a.h" #include "gk20a.h" @@ -65,8 +66,7 @@ static int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s, int err = 0; if (!nvhost_syncpt_is_valid_pt_ext(sp->host1x_pdev, id)) { - dev_warn(dev_from_gk20a(c->g), - "invalid wait id in gpfifo submit, elided"); + nvgpu_warn(c->g, "invalid wait id in gpfifo submit, elided"); return 0; } @@ -75,7 +75,7 @@ static int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s, err = gk20a_channel_alloc_priv_cmdbuf(c, 4, wait_cmd); if (err) { - gk20a_err(dev_from_gk20a(c->g), + nvgpu_err(c->g, "not enough priv cmd buffer space"); return err; } @@ -131,7 +131,7 @@ static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd, err = gk20a_channel_alloc_priv_cmdbuf(c, 4 * num_wait_cmds, wait_cmd); if (err) { - gk20a_err(dev_from_gk20a(c->g), + nvgpu_err(c->g, "not enough priv cmd buffer space"); sync_fence_put(sync_fence); return err; @@ -360,7 +360,7 @@ gk20a_channel_syncpt_create(struct channel_gk20a *c) c->hw_chid, syncpt_name); if (!sp->id) { nvgpu_kfree(c->g, sp); - gk20a_err(c->g->dev, "failed to get free syncpt"); + nvgpu_err(c->g, "failed to get free syncpt"); return NULL; } @@ -501,7 +501,7 @@ static void gk20a_channel_semaphore_launcher( fence, fence->name); err = sync_fence_wait(fence, -1); if (err < 0) - dev_err(g->dev, "error waiting pre-fence: %d\n", err); + nvgpu_err(g, "error waiting pre-fence: %d\n", err); gk20a_dbg_info( "wait completed (%d) for fence %p '%s', triggering gpu work", @@ -594,8 +594,8 @@ static int gk20a_channel_semaphore_wait_syncpt( { struct gk20a_channel_semaphore *sema = container_of(s, struct gk20a_channel_semaphore, ops); - struct device *dev = dev_from_gk20a(sema->c->g); - gk20a_err(dev, "trying to use syncpoint synchronization"); + struct gk20a *g = sema->c->g; + nvgpu_err(g, "trying to use syncpoint synchronization"); return -ENODEV; } @@ -707,7 +707,7 @@ static int gk20a_channel_semaphore_wait_fd( err = gk20a_channel_alloc_priv_cmdbuf(c, 8, wait_cmd); if (err) { - gk20a_err(dev_from_gk20a(c->g), + nvgpu_err(c->g, "not enough priv cmd buffer space"); goto clean_up_sync_fence; } @@ -724,7 +724,7 @@ static int gk20a_channel_semaphore_wait_fd( w->ch = c; w->sema = nvgpu_semaphore_alloc(c); if (!w->sema) { - gk20a_err(dev_from_gk20a(c->g), "ran out of semaphores"); + nvgpu_err(c->g, "ran out of semaphores"); err = -ENOMEM; goto clean_up_worker; } @@ -779,7 +779,7 @@ clean_up_sync_fence: sync_fence_put(sync_fence); return err; #else - gk20a_err(dev_from_gk20a(c->g), + nvgpu_err(c->g, "trying to use sync fds with CONFIG_SYNC disabled"); return -ENODEV; #endif @@ -801,7 +801,7 @@ static int __gk20a_channel_semaphore_incr( semaphore = nvgpu_semaphore_alloc(c); if (!semaphore) { - gk20a_err(dev_from_gk20a(c->g), + nvgpu_err(c->g, "ran out of semaphores"); return -ENOMEM; } @@ -809,7 +809,7 @@ static int __gk20a_channel_semaphore_incr( incr_cmd_size = 10; err = gk20a_channel_alloc_priv_cmdbuf(c, incr_cmd_size, incr_cmd); if (err) { - gk20a_err(dev_from_gk20a(c->g), + nvgpu_err(c->g, "not enough priv cmd buffer space"); goto clean_up_sema; } @@ -889,7 +889,7 @@ static int gk20a_channel_semaphore_incr_user( #else struct gk20a_channel_semaphore *sema = container_of(s, struct gk20a_channel_semaphore, ops); - gk20a_err(dev_from_gk20a(sema->c->g), + nvgpu_err(sema->c->g, "trying to use sync fds with CONFIG_SYNC disabled"); return -ENODEV; #endif diff --git a/drivers/gpu/nvgpu/gk20a/clk_gk20a.c b/drivers/gpu/nvgpu/gk20a/clk_gk20a.c index 38d4b555..443cd5e1 100644 --- a/drivers/gpu/nvgpu/gk20a/clk_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/clk_gk20a.c @@ -24,6 +24,8 @@ #include "gk20a.h" +#include + #include #include @@ -251,7 +253,7 @@ static int clk_slide_gpc_pll(struct gk20a *g, u32 n) gk20a_readl(g, trim_sys_gpcpll_ndiv_slowdown_r()); if (ramp_timeout <= 0) { - gk20a_err(dev_from_gk20a(g), "gpcpll dynamic ramp timeout"); + nvgpu_err(g, "gpcpll dynamic ramp timeout"); return -ETIMEDOUT; } return 0; @@ -439,7 +441,7 @@ static int gk20a_init_clk_setup_sw(struct gk20a *g) ref = clk_get_parent(clk_get_parent(clk->tegra_clk)); if (IS_ERR(ref)) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "failed to get GPCPLL reference clock"); err = -EINVAL; goto fail; @@ -449,7 +451,7 @@ static int gk20a_init_clk_setup_sw(struct gk20a *g) clk->gpc_pll.id = GK20A_GPC_PLL; clk->gpc_pll.clk_in = ref_rate / KHZ; if (clk->gpc_pll.clk_in == 0) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "GPCPLL reference clock is zero"); err = -EINVAL; goto fail; @@ -508,7 +510,7 @@ static int set_pll_target(struct gk20a *g, u32 freq, u32 old_freq) /* gpc_pll.freq is changed to new value here */ if (clk_config_pll(clk, &clk->gpc_pll, &gpc_pll_params, &freq, true)) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "failed to set pll target for %d", freq); return -EINVAL; } @@ -536,8 +538,7 @@ static int set_pll_freq(struct gk20a *g, u32 freq, u32 old_freq) /* Just report error but not restore PLL since dvfs could already change voltage even when it returns error. */ if (err) - gk20a_err(dev_from_gk20a(g), - "failed to set pll to %d", freq); + nvgpu_err(g, "failed to set pll to %d", freq); return err; } diff --git a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c index e5910e7f..76237e03 100644 --- a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c @@ -27,6 +27,8 @@ #include "gk20a.h" #include "css_gr_gk20a.h" +#include + #include #include @@ -299,8 +301,7 @@ static int css_gr_flush_snapshots(struct channel_gk20a *ch) cur->snapshot->hw_overflow_events_occured++; } - gk20a_warn(dev_from_gk20a(g), - "cyclestats: hardware overflow detected\n"); + nvgpu_warn(g, "cyclestats: hardware overflow detected"); } /* process all items in HW buffer */ @@ -340,8 +341,7 @@ static int css_gr_flush_snapshots(struct channel_gk20a *ch) dst_nxt = dst_head; } else { /* client not found - skipping this entry */ - gk20a_warn(dev_from_gk20a(g), - "cyclestats: orphaned perfmon %u\n", + nvgpu_warn(g, "cyclestats: orphaned perfmon %u", src->perfmon_id); goto next_hw_fifo_entry; } @@ -351,8 +351,7 @@ static int css_gr_flush_snapshots(struct channel_gk20a *ch) if (dst_nxt == dst_get) { /* no data copy, no pointer updates */ dst->sw_overflow_events_occured++; - gk20a_warn(dev_from_gk20a(g), - "cyclestats: perfmon %u soft overflow\n", + nvgpu_warn(g, "cyclestats: perfmon %u soft overflow", src->perfmon_id); } else { *dst_put = *src; @@ -392,8 +391,7 @@ next_hw_fifo_entry: /* not all entries proceed correctly. some of problems */ /* reported as overflows, some as orphaned perfmons, */ /* but it will be better notify with summary about it */ - gk20a_warn(dev_from_gk20a(g), - "cyclestats: completed %u from %u entries\n", + nvgpu_warn(g, "cyclestats: completed %u from %u entries", completed, pending); } diff --git a/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c index cc008844..b33845d1 100644 --- a/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c @@ -32,6 +32,8 @@ #include "gk20a.h" #include "gr_gk20a.h" +#include + #include #include @@ -601,7 +603,7 @@ int gk20a_ctxsw_trace_write(struct gk20a *g, write_idx = hdr->write_idx; if (write_idx >= dev->num_ents) { - gk20a_err(dev_from_gk20a(dev->g), + nvgpu_err(dev->g, "write_idx=%u out of range [0..%u]", write_idx, dev->num_ents); ret = -ENOSPC; diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c index d7f8ceba..bc3f67c4 100644 --- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c @@ -25,6 +25,7 @@ #include #include +#include #include "gk20a.h" #include "gr_gk20a.h" @@ -229,7 +230,7 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s, ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); if (!ch) { - gk20a_err(dev_from_gk20a(dbg_s->g), + nvgpu_err(dbg_s->g, "no channel bound to dbg session\n"); return -EINVAL; } @@ -248,7 +249,7 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s, break; default: - gk20a_err(dev_from_gk20a(dbg_s->g), + nvgpu_err(dbg_s->g, "unrecognized dbg gpu events ctrl cmd: 0x%x", args->cmd); ret = -EINVAL; @@ -402,7 +403,7 @@ static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s, break; default: - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "unrecognized dbg gpu timeout mode : 0x%x", timeout_mode); err = -EINVAL; @@ -742,7 +743,7 @@ static int nvgpu_dbg_gpu_ioctl_read_single_sm_error_state( write_size); nvgpu_mutex_release(&g->dbg_sessions_lock); if (err) { - gk20a_err(dev_from_gk20a(g), "copy_to_user failed!\n"); + nvgpu_err(g, "copy_to_user failed!\n"); return err; } @@ -1099,7 +1100,7 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd, break; default: - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "unrecognized dbg gpu ioctl cmd: 0x%x", cmd); err = -ENOTTY; @@ -1146,14 +1147,13 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, int err = 0, powergate_err = 0; bool is_pg_disabled = false; - struct device *dev = dbg_s->dev; struct gk20a *g = dbg_s->g; struct channel_gk20a *ch; gk20a_dbg_fn("%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops); if (args->num_ops > g->gpu_characteristics.reg_ops_limit) { - gk20a_err(dev, "regops limit exceeded"); + nvgpu_err(g, "regops limit exceeded"); return -EINVAL; } @@ -1163,25 +1163,25 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, } if (g->dbg_regops_tmp_buf_ops == 0 || !g->dbg_regops_tmp_buf) { - gk20a_err(dev, "reg ops work buffer not allocated"); + nvgpu_err(g, "reg ops work buffer not allocated"); return -ENODEV; } if (!dbg_s->id) { - gk20a_err(dev, "can't call reg_ops on an unbound debugger session"); + nvgpu_err(g, "can't call reg_ops on an unbound debugger session"); return -EINVAL; } ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); if (!dbg_s->is_profiler && !ch) { - gk20a_err(dev, "bind a channel before regops for a debugging session"); + nvgpu_err(g, "bind a channel before regops for a debugging session"); return -EINVAL; } /* be sure that ctx info is in place */ if (!gk20a_gpu_is_virtual(dbg_s->dev) && !gr_context_info_available(dbg_s, &g->gr)) { - gk20a_err(dev, "gr context data not available\n"); + nvgpu_err(g, "gr context data not available\n"); return -ENODEV; } @@ -1221,7 +1221,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, if (copy_from_user(g->dbg_regops_tmp_buf, fragment, fragment_size)) { - dev_err(dev, "copy_from_user failed!"); + nvgpu_err(g, "copy_from_user failed!"); err = -EFAULT; break; } @@ -1233,7 +1233,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, if (copy_to_user(fragment, g->dbg_regops_tmp_buf, fragment_size)) { - dev_err(dev, "copy_to_user failed!"); + nvgpu_err(g, "copy_to_user failed!"); err = -EFAULT; break; } @@ -1255,7 +1255,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, err = powergate_err; if (err) - gk20a_err(dev, "dbg regops failed"); + nvgpu_err(g, "dbg regops failed"); return err; } @@ -1350,7 +1350,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode) break; default: - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "unrecognized dbg gpu powergate mode: 0x%x", powermode); err = -ENOTTY; @@ -1388,7 +1388,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s, err = gk20a_busy(g); if (err) { - gk20a_err(dev_from_gk20a(g), "failed to poweron"); + nvgpu_err(g, "failed to poweron"); return err; } @@ -1397,7 +1397,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s, ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); if (!ch_gk20a) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "no bound channel for smpc ctxsw mode update\n"); err = -EINVAL; goto clean_up; @@ -1406,7 +1406,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s, err = g->ops.gr.update_smpc_ctxsw_mode(g, ch_gk20a, args->mode == NVGPU_DBG_GPU_SMPC_CTXSW_MODE_CTXSW); if (err) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "error (%d) during smpc ctxsw mode update\n", err); goto clean_up; } @@ -1434,13 +1434,13 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s, * cleaned up. */ if (!dbg_s->has_profiler_reservation) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "session doesn't have a valid reservation"); } err = gk20a_busy(g); if (err) { - gk20a_err(dev_from_gk20a(g), "failed to poweron"); + nvgpu_err(g, "failed to poweron"); return err; } @@ -1449,7 +1449,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s, ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); if (!ch_gk20a) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "no bound channel for pm ctxsw mode update\n"); err = -EINVAL; goto clean_up; @@ -1458,7 +1458,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s, err = g->ops.gr.update_hwpm_ctxsw_mode(g, ch_gk20a, args->mode == NVGPU_DBG_GPU_HWPM_CTXSW_MODE_CTXSW); if (err) - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "error (%d) during pm ctxsw mode update\n", err); /* gk20a would require a WAR to set the core PM_ENABLE bit, not @@ -1486,7 +1486,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm( err = gk20a_busy(g); if (err) { - gk20a_err(dev_from_gk20a(g), "failed to poweron"); + nvgpu_err(g, "failed to poweron"); return err; } @@ -1495,7 +1495,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm( /* Suspend GPU context switching */ err = gr_gk20a_disable_ctxsw(g); if (err) { - gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw"); + nvgpu_err(g, "unable to stop gr ctxsw"); /* this should probably be ctx-fatal... */ goto clean_up; } @@ -1512,7 +1512,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm( err = gr_gk20a_enable_ctxsw(g); if (err) - gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n"); + nvgpu_err(g, "unable to restart ctxsw!\n"); clean_up: nvgpu_mutex_release(&g->dbg_sessions_lock); @@ -1544,7 +1544,7 @@ static int nvgpu_ioctl_allocate_profiler_object( else { prof_obj->ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); if (prof_obj->ch == NULL) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "bind a channel for dbg session"); nvgpu_kfree(g, prof_obj); err = -EINVAL; @@ -1582,7 +1582,7 @@ static int nvgpu_ioctl_free_profiler_object( dbg_profiler_object_data, prof_obj_entry) { if (prof_obj->prof_handle == args->profiler_handle) { if (prof_obj->session_id != dbg_s->id) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "invalid handle %x", args->profiler_handle); err = -EINVAL; @@ -1598,7 +1598,7 @@ static int nvgpu_ioctl_free_profiler_object( } } if (!obj_found) { - gk20a_err(dev_from_gk20a(g), "profiler %x not found", + nvgpu_err(g, "profiler %x not found", args->profiler_handle); err = -EINVAL; } @@ -1618,7 +1618,7 @@ static struct dbg_profiler_object_data *find_matching_prof_obj( dbg_profiler_object_data, prof_obj_entry) { if (prof_obj->prof_handle == profiler_handle) { if (prof_obj->session_id != dbg_s->id) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "invalid handle %x", profiler_handle); return NULL; @@ -1667,7 +1667,7 @@ static void nvgpu_release_profiler_reservation(struct dbg_session_gk20a *dbg_s, g->profiler_reservation_count--; if (g->profiler_reservation_count < 0) - gk20a_err(dev_from_gk20a(g), "Negative reservation count!"); + nvgpu_err(g, "Negative reservation count!"); dbg_s->has_profiler_reservation = false; prof_obj->has_reservation = false; if (prof_obj->ch == NULL) @@ -1684,7 +1684,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle); if (g->profiler_reservation_count < 0) { - gk20a_err(dev_from_gk20a(g), "Negative reservation count!"); + nvgpu_err(g, "Negative reservation count!"); return -EINVAL; } @@ -1694,7 +1694,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, my_prof_obj = find_matching_prof_obj(dbg_s, profiler_handle); if (!my_prof_obj) { - gk20a_err(dev_from_gk20a(g), "object not found"); + nvgpu_err(g, "object not found"); err = -EINVAL; goto exit; } @@ -1711,7 +1711,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, */ if (!g->ops.dbg_session_ops.check_and_set_global_reservation( dbg_s, my_prof_obj)) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "global reserve: have existing reservation"); err = -EBUSY; } @@ -1719,7 +1719,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, /* If there's a global reservation, * we can't take a per-context one. */ - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "per-ctxt reserve: global reservation in effect"); err = -EBUSY; } else if (gk20a_is_channel_marked_as_tsg(my_prof_obj->ch)) { @@ -1732,7 +1732,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, dbg_profiler_object_data, prof_obj_entry) { if (prof_obj->has_reservation && (prof_obj->ch->tsgid == my_tsgid)) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "per-ctxt reserve (tsg): already reserved"); err = -EBUSY; goto exit; @@ -1742,7 +1742,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, if (!g->ops.dbg_session_ops.check_and_set_context_reservation( dbg_s, my_prof_obj)) { /* Another guest OS has the global reservation */ - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "per-ctxt reserve: global reservation in effect"); err = -EBUSY; } @@ -1756,7 +1756,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, dbg_profiler_object_data, prof_obj_entry) { if (prof_obj->has_reservation && (prof_obj->ch == my_ch)) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "per-ctxt reserve (ch): already reserved"); err = -EBUSY; goto exit; @@ -1766,7 +1766,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, if (!g->ops.dbg_session_ops.check_and_set_context_reservation( dbg_s, my_prof_obj)) { /* Another guest OS has the global reservation */ - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "per-ctxt reserve: global reservation in effect"); err = -EBUSY; } @@ -1791,7 +1791,7 @@ static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s, prof_obj = find_matching_prof_obj(dbg_s, profiler_handle); if (!prof_obj) { - gk20a_err(dev_from_gk20a(g), "object not found"); + nvgpu_err(g, "object not found"); err = -EINVAL; goto exit; } @@ -1799,7 +1799,7 @@ static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s, if (prof_obj->has_reservation) g->ops.dbg_session_ops.release_profiler_reservation(dbg_s, prof_obj); else { - gk20a_err(dev_from_gk20a(g), "No reservation found"); + nvgpu_err(g, "No reservation found"); err = -EINVAL; goto exit; } @@ -1854,7 +1854,7 @@ static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s, err = gk20a_busy(g); if (err) { - gk20a_err(dev_from_gk20a(g), "failed to poweron"); + nvgpu_err(g, "failed to poweron"); goto fail_unmap; } @@ -1895,7 +1895,7 @@ static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s, err = gk20a_busy(g); if (err) { - gk20a_err(dev_from_gk20a(g), "failed to poweron"); + nvgpu_err(g, "failed to poweron"); return err; } diff --git a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c index 5724be72..85b24f2e 100644 --- a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "gk20a.h" #include "debug_gk20a.h" @@ -145,7 +146,7 @@ static int gk20a_gr_debug_show(struct seq_file *s, void *unused) err = gk20a_busy(g); if (err) { - gk20a_err(dev, "failed to power on gpu: %d", err); + nvgpu_err(g, "failed to power on gpu: %d", err); return -EINVAL; } @@ -186,7 +187,7 @@ static int gk20a_debug_show(struct seq_file *s, void *unused) err = gk20a_busy(g); if (err) { - gk20a_err(g->dev, "failed to power on gpu: %d", err); + nvgpu_err(g, "failed to power on gpu: %d", err); return -EFAULT; } diff --git a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c index 96b94ea7..ad228a8c 100644 --- a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c @@ -31,6 +31,8 @@ #include "gk20a.h" #include "gr_gk20a.h" +#include + #include #include @@ -156,7 +158,7 @@ static int gk20a_fecs_trace_hash_add(struct gk20a *g, u32 context_ptr, pid_t pid he = nvgpu_kzalloc(g, sizeof(*he)); if (unlikely(!he)) { - gk20a_warn(dev_from_gk20a(g), + nvgpu_warn(g, "can't alloc new hash entry for context_ptr=%x pid=%d", context_ptr, pid); return -ENOMEM; @@ -255,7 +257,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index) "consuming record trace=%p read=%d record=%p", trace, index, r); if (unlikely(!gk20a_fecs_trace_is_valid_record(r))) { - gk20a_warn(dev_from_gk20a(g), + nvgpu_warn(g, "trace=%p read=%d record=%p magic_lo=%08x magic_hi=%08x (invalid)", trace, index, r, r->magic_lo, r->magic_hi); return -EINVAL; @@ -342,7 +344,7 @@ static int gk20a_fecs_trace_poll(struct gk20a *g) nvgpu_mutex_acquire(&trace->poll_lock); write = gk20a_fecs_trace_get_write_index(g); if (unlikely((write < 0) || (write >= GK20A_FECS_TRACE_NUM_RECORDS))) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "failed to acquire write index, write=%d", write); err = write; goto done; @@ -571,7 +573,7 @@ static int gk20a_fecs_trace_init(struct gk20a *g) trace = nvgpu_kzalloc(g, sizeof(struct gk20a_fecs_trace)); if (!trace) { - gk20a_warn(dev_from_gk20a(g), "failed to allocate fecs_trace"); + nvgpu_warn(g, "failed to allocate fecs_trace"); return -ENOMEM; } g->fecs_trace = trace; @@ -586,7 +588,7 @@ static int gk20a_fecs_trace_init(struct gk20a *g) BUG_ON(!is_power_of_2(GK20A_FECS_TRACE_NUM_RECORDS)); err = gk20a_fecs_trace_alloc_ring(g); if (err) { - gk20a_warn(dev_from_gk20a(g), "failed to allocate FECS ring"); + nvgpu_warn(g, "failed to allocate FECS ring"); goto clean_hash_lock; } @@ -754,7 +756,7 @@ static int gk20a_fecs_trace_enable(struct gk20a *g) task = kthread_run(gk20a_fecs_trace_periodic_polling, g, __func__); if (unlikely(IS_ERR(task))) { - gk20a_warn(dev_from_gk20a(g), + nvgpu_warn(g, "failed to create FECS polling task"); return PTR_ERR(task); } diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index ca09c22a..48253e59 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "gk20a.h" #include "debug_gk20a.h" @@ -105,7 +106,7 @@ struct fifo_engine_info_gk20a *gk20a_fifo_get_engine_info(struct gk20a *g, u32 e } if (!info) - gk20a_err(g->dev, "engine_id is not in active list/invalid %d", engine_id); + nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id); return info; } @@ -131,7 +132,7 @@ bool gk20a_fifo_is_valid_engine_id(struct gk20a *g, u32 engine_id) } if (!valid) - gk20a_err(g->dev, "engine_id is not in active list/invalid %d", engine_id); + nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id); return valid; } @@ -146,7 +147,7 @@ u32 gk20a_fifo_get_gr_engine_id(struct gk20a *g) 1, ENGINE_GR_GK20A); if (!gr_engine_cnt) { - gk20a_err(dev_from_gk20a(g), "No GR engine available on this device!\n"); + nvgpu_err(g, "No GR engine available on this device!\n"); } return gr_engine_id; @@ -218,7 +219,7 @@ u32 gk20a_fifo_get_gr_runlist_id(struct gk20a *g) 1, ENGINE_GR_GK20A); if (!gr_engine_cnt) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "No GR engine available on this device!"); goto end; } @@ -228,7 +229,7 @@ u32 gk20a_fifo_get_gr_runlist_id(struct gk20a *g) if (engine_info) { gr_runlist_id = engine_info->runlist_id; } else { - gk20a_err(g->dev, + nvgpu_err(g, "gr_engine_id is not in active list/invalid %d", gr_engine_id); } @@ -273,7 +274,7 @@ static inline u32 gk20a_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id) if (engine_info) { fault_id = engine_info->fault_id; } else { - gk20a_err(g->dev, "engine_id is not in active list/invalid %d", engine_id); + nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id); } return fault_id; } @@ -321,7 +322,6 @@ int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type, int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) { struct gk20a *g = f->g; - struct device *d = dev_from_gk20a(g); u32 i; u32 max_info_entries = top_device_info__size_1_v(); u32 engine_enum = ENGINE_INVAL_GK20A; @@ -375,7 +375,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) } if (!found_pbdma_for_runlist) { - gk20a_err(d, "busted pbdma map"); + nvgpu_err(g, "busted pbdma map"); return -EINVAL; } } @@ -647,7 +647,6 @@ static void fifo_engine_exception_status(struct gk20a *g, static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) { struct fifo_runlist_info_gk20a *runlist; - struct device *d = dev_from_gk20a(g); unsigned int runlist_id; u32 i; size_t runlist_size; @@ -689,7 +688,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) int err = nvgpu_dma_alloc_sys(g, runlist_size, &runlist->mem[i]); if (err) { - dev_err(d, "memory allocation failed\n"); + nvgpu_err(g, "memory allocation failed\n"); goto clean_up_runlist; } } @@ -888,7 +887,6 @@ static void gk20a_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f) static int gk20a_init_fifo_setup_sw(struct gk20a *g) { struct fifo_gk20a *f = &g->fifo; - struct device *d = dev_from_gk20a(g); unsigned int chid, i; int err = 0; @@ -948,7 +946,7 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g) err = nvgpu_dma_alloc_sys(g, f->userd_entry_size * f->num_channels, &f->userd); if (err) { - dev_err(d, "userd memory allocation failed\n"); + nvgpu_err(g, "userd memory allocation failed\n"); goto clean_up; } gk20a_dbg(gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va); @@ -1032,7 +1030,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g) smp_mb(); if (v1 != gk20a_bar1_readl(g, bar1_vaddr)) { - gk20a_err(dev_from_gk20a(g), "bar1 broken @ gk20a: CPU wrote 0x%x, \ + nvgpu_err(g, "bar1 broken @ gk20a: CPU wrote 0x%x, \ GPU read 0x%x", *cpu_vaddr, gk20a_bar1_readl(g, bar1_vaddr)); return -EINVAL; } @@ -1040,14 +1038,14 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g) gk20a_bar1_writel(g, bar1_vaddr, v2); if (v2 != gk20a_bar1_readl(g, bar1_vaddr)) { - gk20a_err(dev_from_gk20a(g), "bar1 broken @ gk20a: GPU wrote 0x%x, \ + nvgpu_err(g, "bar1 broken @ gk20a: GPU wrote 0x%x, \ CPU read 0x%x", gk20a_bar1_readl(g, bar1_vaddr), *cpu_vaddr); return -EINVAL; } /* is it visible to the cpu? */ if (*cpu_vaddr != v2) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "cpu didn't see bar1 write @ %p!", cpu_vaddr); } @@ -1230,7 +1228,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id) } if (engine_enum == ENGINE_INVAL_GK20A) - gk20a_err(dev_from_gk20a(g), "unsupported engine_id %d", engine_id); + nvgpu_err(g, "unsupported engine_id %d", engine_id); if (engine_enum == ENGINE_GR_GK20A) { if (support_gk20a_pmu(g->dev) && g->elpg_enabled) @@ -1242,7 +1240,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id) g->ops.fecs_trace.reset(g); /*HALT_PIPELINE method, halt GR engine*/ if (gr_gk20a_halt_pipe(g)) - gk20a_err(dev_from_gk20a(g), "failed to HALT gr pipe"); + nvgpu_err(g, "failed to HALT gr pipe"); /* resetting engine using mc_enable_r() is not enough, we do full init sequence */ gk20a_gr_reset(g); @@ -1260,16 +1258,15 @@ static void gk20a_fifo_handle_chsw_fault(struct gk20a *g) u32 intr; intr = gk20a_readl(g, fifo_intr_chsw_error_r()); - gk20a_err(dev_from_gk20a(g), "chsw: %08x\n", intr); + nvgpu_err(g, "chsw: %08x\n", intr); gk20a_fecs_dump_falcon_stats(g); gk20a_writel(g, fifo_intr_chsw_error_r(), intr); } static void gk20a_fifo_handle_dropped_mmu_fault(struct gk20a *g) { - struct device *dev = dev_from_gk20a(g); u32 fault_id = gk20a_readl(g, fifo_intr_mmu_fault_id_r()); - gk20a_err(dev, "dropped mmu fault (0x%08x)", fault_id); + nvgpu_err(g, "dropped mmu fault (0x%08x)", fault_id); } bool gk20a_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid) @@ -1381,7 +1378,7 @@ bool gk20a_fifo_error_tsg(struct gk20a *g, void gk20a_fifo_set_ctx_mmu_error_ch(struct gk20a *g, struct channel_gk20a *refch) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "channel %d generated a mmu fault", refch->hw_chid); gk20a_set_error_notifier(refch, NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT); @@ -1392,7 +1389,7 @@ void gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g, { struct channel_gk20a *ch = NULL; - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "TSG %d generated a mmu fault", tsg->tsgid); down_read(&tsg->ch_list_lock); @@ -1544,7 +1541,7 @@ static bool gk20a_fifo_handle_mmu_fault( f.engine_subid_desc, f.client_desc, f.fault_type_desc); - gk20a_err(dev_from_gk20a(g), "%s mmu fault on engine %d, " + nvgpu_err(g, "%s mmu fault on engine %d, " "engine subid %d (%s), client %d (%s), " "addr 0x%08x:0x%08x, type %d (%s), info 0x%08x," "inst_ptr 0x%llx\n", @@ -1558,7 +1555,7 @@ static bool gk20a_fifo_handle_mmu_fault( if (ctxsw) { gk20a_fecs_dump_falcon_stats(g); - gk20a_err(dev_from_gk20a(g), "gr_status_r : 0x%x", + nvgpu_err(g, "gr_status_r : 0x%x", gk20a_readl(g, gr_status_r())); } @@ -1654,18 +1651,18 @@ static bool gk20a_fifo_handle_mmu_fault( gk20a_channel_abort(ch, false); gk20a_channel_put(ch); } else { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "mmu error in freed channel %d", ch->hw_chid); } } else if (f.inst_ptr == gk20a_mm_inst_block_addr(g, &g->mm.bar1.inst_block)) { - gk20a_err(dev_from_gk20a(g), "mmu fault from bar1"); + nvgpu_err(g, "mmu fault from bar1"); } else if (f.inst_ptr == gk20a_mm_inst_block_addr(g, &g->mm.pmu.inst_block)) { - gk20a_err(dev_from_gk20a(g), "mmu fault from pmu"); + nvgpu_err(g, "mmu fault from pmu"); } else - gk20a_err(dev_from_gk20a(g), "couldn't locate channel for mmu fault"); + nvgpu_err(g, "couldn't locate channel for mmu fault"); } /* clear interrupt */ @@ -2137,7 +2134,7 @@ static bool gk20a_fifo_handle_sched_error(struct gk20a *g) /* could not find the engine - should never happen */ if (!gk20a_fifo_is_valid_engine_id(g, engine_id)) { - gk20a_err(dev_from_gk20a(g), "fifo sched error : 0x%08x, failed to find engine\n", + nvgpu_err(g, "fifo sched error : 0x%08x, failed to find engine\n", sched_error); ret = false; goto err; @@ -2158,7 +2155,7 @@ static bool gk20a_fifo_handle_sched_error(struct gk20a *g) } if (ret) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "fifo sched ctxsw timeout error: " "engine=%u, %s=%d, ms=%u", engine_id, is_tsg ? "tsg" : "ch", id, ms); @@ -2175,7 +2172,7 @@ static bool gk20a_fifo_handle_sched_error(struct gk20a *g) "%s=%d", ms, is_tsg ? "tsg" : "ch", id); } } else { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "fifo sched error : 0x%08x, engine=%u, %s=%d", sched_error, engine_id, is_tsg ? "tsg" : "ch", id); } @@ -2187,7 +2184,6 @@ err: static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr) { bool print_channel_reset_log = false; - struct device *dev = dev_from_gk20a(g); u32 handled = 0; gk20a_dbg_fn("fifo_intr=0x%08x", fifo_intr); @@ -2195,13 +2191,13 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr) if (fifo_intr & fifo_intr_0_pio_error_pending_f()) { /* pio mode is unused. this shouldn't happen, ever. */ /* should we clear it or just leave it pending? */ - gk20a_err(dev, "fifo pio error!\n"); + nvgpu_err(g, "fifo pio error!\n"); BUG_ON(1); } if (fifo_intr & fifo_intr_0_bind_error_pending_f()) { u32 bind_error = gk20a_readl(g, fifo_intr_bind_error_r()); - gk20a_err(dev, "fifo bind error: 0x%08x", bind_error); + nvgpu_err(g, "fifo bind error: 0x%08x", bind_error); print_channel_reset_log = true; handled |= fifo_intr_0_bind_error_pending_f(); } @@ -2233,7 +2229,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr) if (print_channel_reset_log) { unsigned int engine_id; - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "channel reset initiated from %s; intr=0x%08x", __func__, fifo_intr); for (engine_id = 0; @@ -2301,8 +2297,7 @@ static bool gk20a_fifo_is_sw_method_subch(struct gk20a *g, int pbdma_id, return false; } -static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev, - struct gk20a *g, +static u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g, struct fifo_gk20a *f, u32 pbdma_id) { @@ -2323,7 +2318,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev, if ((f->intr.pbdma.device_fatal_0 | f->intr.pbdma.channel_fatal_0 | f->intr.pbdma.restartable_0) & pbdma_intr_0) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "pbdma_intr_0(%d):0x%08x PBH: %08x SHADOW: %08x M0: %08x %08x %08x %08x", pbdma_id, pbdma_intr_0, gk20a_readl(g, pbdma_pb_header_r(pbdma_id)), @@ -2346,7 +2341,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev, gk20a_writel(g, pbdma_acquire_r(pbdma_id), val); if (g->timeouts_enabled) { reset = true; - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "semaphore acquire timeout!"); } handled |= pbdma_intr_0_acquire_pending_f(); @@ -2387,7 +2382,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev, /* all intrs in _intr_1 are "host copy engine" related, * which gk20a doesn't have. for now just make them channel fatal. */ if (pbdma_intr_1) { - dev_err(dev, "channel hce error: pbdma_intr_1(%d): 0x%08x", + nvgpu_err(g, "channel hce error: pbdma_intr_1(%d): 0x%08x", pbdma_id, pbdma_intr_1); reset = true; gk20a_writel(g, pbdma_intr_1_r(pbdma_id), pbdma_intr_1); @@ -2428,7 +2423,6 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev, static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr) { - struct device *dev = dev_from_gk20a(g); struct fifo_gk20a *f = &g->fifo; u32 clear_intr = 0, i; u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); @@ -2438,7 +2432,7 @@ static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr) if (fifo_intr_pbdma_id_status_v(pbdma_pending, i)) { gk20a_dbg(gpu_dbg_intr, "pbdma id %d intr pending", i); clear_intr |= - gk20a_fifo_handle_pbdma_intr(dev, g, f, i); + gk20a_fifo_handle_pbdma_intr(g, f, i); } } return fifo_intr_0_pbdma_intr_pending_f(); @@ -2534,7 +2528,7 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id, struct tsg_gk20a *tsg = &g->fifo.tsg[id]; struct channel_gk20a *ch = NULL; - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "preempt TSG %d timeout\n", id); down_read(&tsg->ch_list_lock); @@ -2550,7 +2544,7 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id, } else { struct channel_gk20a *ch = &g->fifo.channel[id]; - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "preempt channel %d timeout\n", id); if (gk20a_channel_get(ch)) { @@ -2733,7 +2727,7 @@ int gk20a_fifo_enable_all_engine_activity(struct gk20a *g) err = gk20a_fifo_enable_engine_activity(g, &g->fifo.engine_info[active_engine_id]); if (err) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "failed to enable engine %d activity\n", active_engine_id); ret = err; } @@ -2806,7 +2800,7 @@ clean_up: if (err) { gk20a_dbg_fn("failed"); if (gk20a_fifo_enable_engine_activity(g, eng_info)) - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "failed to enable gr engine activity\n"); } else { gk20a_dbg_fn("done"); @@ -3155,7 +3149,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, ret = gk20a_fifo_runlist_wait_pending(g, runlist_id); if (ret == -ETIMEDOUT) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "runlist update timeout"); gk20a_fifo_runlist_reset_engines(g, runlist_id); @@ -3167,10 +3161,10 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, * should be fine */ if (ret) - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "runlist update failed: %d", ret); } else if (ret == -EINTR) - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "runlist update interrupted"); } @@ -3196,7 +3190,7 @@ int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 hw_chid, /* Capture the last failure error code */ errcode = g->ops.fifo.update_runlist(g, runlist_id, hw_chid, add, wait_for_finish); if (errcode) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "failed to update_runlist %d %d", runlist_id, errcode); ret = errcode; } @@ -4051,8 +4045,7 @@ int gk20a_fifo_set_timeslice(struct channel_gk20a *ch, u32 timeslice) struct gk20a *g = ch->g; if (gk20a_is_channel_marked_as_tsg(ch)) { - gk20a_err(dev_from_gk20a(ch->g), - "invalid operation for TSG!\n"); + nvgpu_err(g, "invalid operation for TSG!\n"); return -EINVAL; } @@ -4071,8 +4064,7 @@ int gk20a_fifo_set_timeslice(struct channel_gk20a *ch, u32 timeslice) int gk20a_fifo_set_priority(struct channel_gk20a *ch, u32 priority) { if (gk20a_is_channel_marked_as_tsg(ch)) { - gk20a_err(dev_from_gk20a(ch->g), - "invalid operation for TSG!\n"); + nvgpu_err(ch->g, "invalid operation for TSG!\n"); return -EINVAL; } diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c index c8acf6f7..05e3c3f4 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gk20a.c @@ -282,7 +282,7 @@ static int gk20a_init_support(struct platform_device *dev) g->regs = gk20a_ioremap_resource(dev, GK20A_BAR0_IORESOURCE_MEM, &g->reg_mem); if (IS_ERR(g->regs)) { - dev_err(dev_from_gk20a(g), "failed to remap gk20a registers\n"); + nvgpu_err(g, "failed to remap gk20a registers\n"); err = PTR_ERR(g->regs); goto fail; } @@ -290,7 +290,7 @@ static int gk20a_init_support(struct platform_device *dev) g->bar1 = gk20a_ioremap_resource(dev, GK20A_BAR1_IORESOURCE_MEM, &g->bar1_mem); if (IS_ERR(g->bar1)) { - dev_err(dev_from_gk20a(g), "failed to remap gk20a bar1\n"); + nvgpu_err(g, "failed to remap gk20a bar1\n"); err = PTR_ERR(g->bar1); goto fail; } @@ -411,7 +411,7 @@ int gk20a_pm_finalize_poweron(struct device *dev) if (platform->busy) { err = platform->busy(dev); if (err < 0) { - dev_err(dev, "%s: failed to poweron platform dependency\n", + nvgpu_err(g, "%s: failed to poweron platform dependency\n", __func__); goto done; } @@ -467,7 +467,7 @@ int gk20a_pm_finalize_poweron(struct device *dev) if (g->ops.clk.init_clk_support) { err = g->ops.clk.init_clk_support(g); if (err) { - gk20a_err(dev, "failed to init gk20a clk"); + nvgpu_err(g, "failed to init gk20a clk"); goto done; } } @@ -475,7 +475,7 @@ int gk20a_pm_finalize_poweron(struct device *dev) err = g->ops.fifo.reset_enable_hw(g); if (err) { - gk20a_err(dev, "failed to reset gk20a fifo"); + nvgpu_err(g, "failed to reset gk20a fifo"); goto done; } @@ -484,13 +484,13 @@ int gk20a_pm_finalize_poweron(struct device *dev) err = gk20a_init_mm_support(g); if (err) { - gk20a_err(dev, "failed to init gk20a mm"); + nvgpu_err(g, "failed to init gk20a mm"); goto done; } err = gk20a_init_fifo_support(g); if (err) { - gk20a_err(dev, "failed to init gk20a fifo"); + nvgpu_err(g, "failed to init gk20a fifo"); goto done; } @@ -501,7 +501,7 @@ int gk20a_pm_finalize_poweron(struct device *dev) err = gk20a_enable_gr_hw(g); if (err) { - gk20a_err(dev, "failed to enable gr"); + nvgpu_err(g, "failed to enable gr"); goto done; } @@ -509,7 +509,7 @@ int gk20a_pm_finalize_poweron(struct device *dev) if (g->ops.pmu.prepare_ucode) err = g->ops.pmu.prepare_ucode(g); if (err) { - gk20a_err(dev, "failed to init pmu ucode"); + nvgpu_err(g, "failed to init pmu ucode"); goto done; } } @@ -518,7 +518,7 @@ int gk20a_pm_finalize_poweron(struct device *dev) if (g->ops.pmupstate) { err = gk20a_init_pstate_support(g); if (err) { - gk20a_err(dev, "failed to init pstates"); + nvgpu_err(g, "failed to init pstates"); goto done; } } @@ -527,21 +527,21 @@ int gk20a_pm_finalize_poweron(struct device *dev) if (g->ops.pmu.is_pmu_supported(g)) { err = gk20a_init_pmu_support(g); if (err) { - gk20a_err(dev, "failed to init gk20a pmu"); + nvgpu_err(g, "failed to init gk20a pmu"); goto done; } } err = gk20a_init_gr_support(g); if (err) { - gk20a_err(dev, "failed to init gk20a gr"); + nvgpu_err(g, "failed to init gk20a gr"); goto done; } if (g->ops.pmu.mclk_init) { err = g->ops.pmu.mclk_init(g); if (err) { - gk20a_err(dev, "failed to set mclk"); + nvgpu_err(g, "failed to set mclk"); /* Indicate error dont goto done */ } } @@ -550,37 +550,37 @@ int gk20a_pm_finalize_poweron(struct device *dev) if (g->ops.pmupstate) { err = gk20a_init_pstate_pmu_support(g); if (err) { - gk20a_err(dev, "failed to init pstates"); + nvgpu_err(g, "failed to init pstates"); goto done; } } err = nvgpu_clk_arb_init_arbiter(g); if (err) { - gk20a_err(dev, "failed to init clk arb"); + nvgpu_err(g, "failed to init clk arb"); goto done; } #endif err = gk20a_init_therm_support(g); if (err) { - gk20a_err(dev, "failed to init gk20a therm"); + nvgpu_err(g, "failed to init gk20a therm"); goto done; } err = g->ops.chip_init_gpu_characteristics(g); if (err) { - gk20a_err(dev, "failed to init gk20a gpu characteristics"); + nvgpu_err(g, "failed to init gk20a gpu characteristics"); goto done; } err = gk20a_ctxsw_trace_init(g); if (err) - gk20a_warn(dev, "could not initialize ctxsw tracing"); + nvgpu_warn(g, "could not initialize ctxsw tracing"); err = gk20a_sched_ctrl_init(g); if (err) { - gk20a_err(dev, "failed to init sched control"); + nvgpu_err(g, "failed to init sched control"); goto done; } @@ -619,7 +619,7 @@ int gk20a_pm_finalize_poweron(struct device *dev) speed = 1 << (fls(speed) - 1); err = g->ops.xve.set_speed(g, speed); if (err) { - gk20a_err(dev, "Failed to set PCIe bus speed!\n"); + nvgpu_err(g, "Failed to set PCIe bus speed!\n"); goto done; } } @@ -1312,7 +1312,7 @@ int __gk20a_do_idle(struct device *dev, bool force_reset) } while (ref_cnt != target_ref_cnt && !nvgpu_timeout_expired(&timeout)); if (ref_cnt != target_ref_cnt) { - gk20a_err(dev, "failed to idle - refcount %d != 1\n", + nvgpu_err(g, "failed to idle - refcount %d != 1\n", ref_cnt); goto fail_drop_usage_count; } @@ -1344,7 +1344,7 @@ int __gk20a_do_idle(struct device *dev, bool force_reset) if (is_railgated) { return 0; } else { - gk20a_err(dev, "failed to idle in timeout\n"); + nvgpu_err(g, "failed to idle in timeout\n"); goto fail_timeout; } } else { diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_scale.c b/drivers/gpu/nvgpu/gk20a/gk20a_scale.c index b411cb5c..06c73b90 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a_scale.c +++ b/drivers/gpu/nvgpu/gk20a/gk20a_scale.c @@ -31,6 +31,8 @@ #include "clk_gk20a.h" #include "gk20a_scale.h" +#include + /* * gk20a_scale_qos_notify() * @@ -59,8 +61,8 @@ int gk20a_scale_qos_notify(struct notifier_block *nb, pm_qos_read_max_bound(PM_QOS_GPU_FREQ_BOUNDS) * 1000; if (profile->qos_min_freq > profile->qos_max_freq) { - gk20a_err(g->dev, - "QoS: setting invalid limit, min_freq=%lu max_freq=%lu\n", + nvgpu_err(g, + "QoS: setting invalid limit, min_freq=%lu max_freq=%lu", profile->qos_min_freq, profile->qos_max_freq); profile->qos_min_freq = profile->qos_max_freq; } diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c index 96185ee7..712359e1 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c @@ -23,6 +23,7 @@ #include #include +#include #include "gk20a.h" #include "gr_ctx_gk20a.h" @@ -111,7 +112,6 @@ static bool gr_gk20a_is_firmware_defined(void) static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) { - struct device *d = dev_from_gk20a(g); const struct firmware *netlist_fw; struct netlist_image *netlist = NULL; char name[MAX_NETLIST_NAME]; @@ -135,13 +135,13 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) for (; net < max; net++) { if (g->ops.gr_ctx.get_netlist_name(g, net, name) != 0) { - gk20a_warn(d, "invalid netlist index %d", net); + nvgpu_warn(g, "invalid netlist index %d", net); continue; } netlist_fw = nvgpu_request_firmware(g, name, 0); if (!netlist_fw) { - gk20a_warn(d, "failed to load netlist %s", name); + nvgpu_warn(g, "failed to load netlist %s", name); continue; } @@ -436,7 +436,7 @@ done: gk20a_dbg_info("netlist image %s loaded", name); return 0; } else { - gk20a_err(d, "failed to load netlist image!!"); + nvgpu_err(g, "failed to load netlist image!!"); return err; } } diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c index 2fdbc01a..12ec9c5f 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c +++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c @@ -23,6 +23,8 @@ #include "sim_gk20a.h" #include "gr_ctx_gk20a.h" +#include + int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr) { int err = 0; @@ -239,8 +241,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr) gk20a_dbg(gpu_dbg_info | gpu_dbg_fn, "finished querying grctx info from chiplib"); return 0; fail: - gk20a_err(dev_from_gk20a(g), - "failed querying grctx info from chiplib"); + nvgpu_err(g, "failed querying grctx info from chiplib"); return err; } diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index af02491e..06374fb7 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "gk20a.h" #include "kind_gk20a.h" @@ -126,81 +127,81 @@ void gk20a_fecs_dump_falcon_stats(struct gk20a *g) { unsigned int i; - gk20a_err(dev_from_gk20a(g), "gr_fecs_os_r : %d", + nvgpu_err(g, "gr_fecs_os_r : %d", gk20a_readl(g, gr_fecs_os_r())); - gk20a_err(dev_from_gk20a(g), "gr_fecs_cpuctl_r : 0x%x", + nvgpu_err(g, "gr_fecs_cpuctl_r : 0x%x", gk20a_readl(g, gr_fecs_cpuctl_r())); - gk20a_err(dev_from_gk20a(g), "gr_fecs_idlestate_r : 0x%x", + nvgpu_err(g, "gr_fecs_idlestate_r : 0x%x", gk20a_readl(g, gr_fecs_idlestate_r())); - gk20a_err(dev_from_gk20a(g), "gr_fecs_mailbox0_r : 0x%x", + nvgpu_err(g, "gr_fecs_mailbox0_r : 0x%x", gk20a_readl(g, gr_fecs_mailbox0_r())); - gk20a_err(dev_from_gk20a(g), "gr_fecs_mailbox1_r : 0x%x", + nvgpu_err(g, "gr_fecs_mailbox1_r : 0x%x", gk20a_readl(g, gr_fecs_mailbox1_r())); - gk20a_err(dev_from_gk20a(g), "gr_fecs_irqstat_r : 0x%x", + nvgpu_err(g, "gr_fecs_irqstat_r : 0x%x", gk20a_readl(g, gr_fecs_irqstat_r())); - gk20a_err(dev_from_gk20a(g), "gr_fecs_irqmode_r : 0x%x", + nvgpu_err(g, "gr_fecs_irqmode_r : 0x%x", gk20a_readl(g, gr_fecs_irqmode_r())); - gk20a_err(dev_from_gk20a(g), "gr_fecs_irqmask_r : 0x%x", + nvgpu_err(g, "gr_fecs_irqmask_r : 0x%x", gk20a_readl(g, gr_fecs_irqmask_r())); - gk20a_err(dev_from_gk20a(g), "gr_fecs_irqdest_r : 0x%x", + nvgpu_err(g, "gr_fecs_irqdest_r : 0x%x", gk20a_readl(g, gr_fecs_irqdest_r())); - gk20a_err(dev_from_gk20a(g), "gr_fecs_debug1_r : 0x%x", + nvgpu_err(g, "gr_fecs_debug1_r : 0x%x", gk20a_readl(g, gr_fecs_debug1_r())); - gk20a_err(dev_from_gk20a(g), "gr_fecs_debuginfo_r : 0x%x", + nvgpu_err(g, "gr_fecs_debuginfo_r : 0x%x", gk20a_readl(g, gr_fecs_debuginfo_r())); for (i = 0; i < gr_fecs_ctxsw_mailbox__size_1_v(); i++) - gk20a_err(dev_from_gk20a(g), "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x", + nvgpu_err(g, "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x", i, gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(i))); - gk20a_err(dev_from_gk20a(g), "gr_fecs_engctl_r : 0x%x", + nvgpu_err(g, "gr_fecs_engctl_r : 0x%x", gk20a_readl(g, gr_fecs_engctl_r())); - gk20a_err(dev_from_gk20a(g), "gr_fecs_curctx_r : 0x%x", + nvgpu_err(g, "gr_fecs_curctx_r : 0x%x", gk20a_readl(g, gr_fecs_curctx_r())); - gk20a_err(dev_from_gk20a(g), "gr_fecs_nxtctx_r : 0x%x", + nvgpu_err(g, "gr_fecs_nxtctx_r : 0x%x", gk20a_readl(g, gr_fecs_nxtctx_r())); gk20a_writel(g, gr_fecs_icd_cmd_r(), gr_fecs_icd_cmd_opc_rreg_f() | gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_IMB)); - gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_IMB : 0x%x", + nvgpu_err(g, "FECS_FALCON_REG_IMB : 0x%x", gk20a_readl(g, gr_fecs_icd_rdata_r())); gk20a_writel(g, gr_fecs_icd_cmd_r(), gr_fecs_icd_cmd_opc_rreg_f() | gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_DMB)); - gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_DMB : 0x%x", + nvgpu_err(g, "FECS_FALCON_REG_DMB : 0x%x", gk20a_readl(g, gr_fecs_icd_rdata_r())); gk20a_writel(g, gr_fecs_icd_cmd_r(), gr_fecs_icd_cmd_opc_rreg_f() | gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_CSW)); - gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_CSW : 0x%x", + nvgpu_err(g, "FECS_FALCON_REG_CSW : 0x%x", gk20a_readl(g, gr_fecs_icd_rdata_r())); gk20a_writel(g, gr_fecs_icd_cmd_r(), gr_fecs_icd_cmd_opc_rreg_f() | gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_CTX)); - gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_CTX : 0x%x", + nvgpu_err(g, "FECS_FALCON_REG_CTX : 0x%x", gk20a_readl(g, gr_fecs_icd_rdata_r())); gk20a_writel(g, gr_fecs_icd_cmd_r(), gr_fecs_icd_cmd_opc_rreg_f() | gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_EXCI)); - gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_EXCI : 0x%x", + nvgpu_err(g, "FECS_FALCON_REG_EXCI : 0x%x", gk20a_readl(g, gr_fecs_icd_rdata_r())); for (i = 0; i < 4; i++) { gk20a_writel(g, gr_fecs_icd_cmd_r(), gr_fecs_icd_cmd_opc_rreg_f() | gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_PC)); - gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_PC : 0x%x", + nvgpu_err(g, "FECS_FALCON_REG_PC : 0x%x", gk20a_readl(g, gr_fecs_icd_rdata_r())); gk20a_writel(g, gr_fecs_icd_cmd_r(), gr_fecs_icd_cmd_opc_rreg_f() | gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_SP)); - gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_SP : 0x%x", + nvgpu_err(g, "FECS_FALCON_REG_SP : 0x%x", gk20a_readl(g, gr_fecs_icd_rdata_r())); } } @@ -373,7 +374,7 @@ int gr_gk20a_wait_idle(struct gk20a *g, unsigned long duration_ms, } while (!nvgpu_timeout_expired(&timeout)); - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "timeout, ctxsw busy : %d, gr busy : %d", ctxsw_active, gr_busy); @@ -408,7 +409,7 @@ int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long duration_ms, delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); } while (!nvgpu_timeout_expired(&timeout)); - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "timeout, fe busy : %x", val); return -EAGAIN; @@ -466,7 +467,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id, /* do no success check */ break; default: - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "invalid success opcode 0x%x", opc_success); check = WAIT_UCODE_ERROR; @@ -498,7 +499,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id, /* do no check on fail*/ break; default: - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "invalid fail opcode 0x%x", opc_fail); check = WAIT_UCODE_ERROR; break; @@ -512,13 +513,13 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id, } if (check == WAIT_UCODE_TIMEOUT) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "timeout waiting on ucode response"); gk20a_fecs_dump_falcon_stats(g); gk20a_gr_debug_dump(g->dev); return -1; } else if (check == WAIT_UCODE_ERROR) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "ucode method failed on mailbox=%d value=0x%08x", mailbox_id, reg); gk20a_fecs_dump_falcon_stats(g); @@ -735,7 +736,7 @@ static int gr_gk20a_fecs_ctx_bind_channel(struct gk20a *g, .cond.ok = GR_IS_UCODE_OP_AND, .cond.fail = GR_IS_UCODE_OP_AND}, true); if (ret) - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "bind channel instance failed"); return ret; @@ -786,13 +787,13 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c) ret = gk20a_disable_channel_tsg(g, c); if (ret) { - gk20a_err(dev_from_gk20a(g), "failed to disable channel/TSG\n"); + nvgpu_err(g, "failed to disable channel/TSG\n"); goto clean_up; } ret = gk20a_fifo_preempt(g, c); if (ret) { gk20a_enable_channel_tsg(g, c); - gk20a_err(dev_from_gk20a(g), "failed to preempt channel/TSG\n"); + nvgpu_err(g, "failed to preempt channel/TSG\n"); goto clean_up; } @@ -1493,7 +1494,7 @@ static int gr_gk20a_fecs_ctx_image_save(struct channel_gk20a *c, u32 save_type) }, true); if (ret) - gk20a_err(dev_from_gk20a(g), "save context image failed"); + nvgpu_err(g, "save context image failed"); return ret; } @@ -1821,7 +1822,7 @@ restore_fe_go_idle: clean_up: if (err) - gk20a_err(dev_from_gk20a(g), "fail"); + nvgpu_err(g, "fail"); else gk20a_dbg_fn("done"); @@ -1844,7 +1845,7 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g, gk20a_dbg_fn(""); if (!ch_ctx->gr_ctx) { - gk20a_err(dev_from_gk20a(g), "no graphics context allocated"); + nvgpu_err(g, "no graphics context allocated"); return -EFAULT; } @@ -1852,13 +1853,13 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g, ret = gk20a_disable_channel_tsg(g, c); if (ret) { - gk20a_err(dev_from_gk20a(g), "failed to disable channel/TSG\n"); + nvgpu_err(g, "failed to disable channel/TSG\n"); goto out; } ret = gk20a_fifo_preempt(g, c); if (ret) { gk20a_enable_channel_tsg(g, c); - gk20a_err(dev_from_gk20a(g), "failed to preempt channel/TSG\n"); + nvgpu_err(g, "failed to preempt channel/TSG\n"); goto out; } @@ -1904,7 +1905,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, gk20a_dbg_fn(""); if (!ch_ctx->gr_ctx) { - gk20a_err(dev_from_gk20a(g), "no graphics context allocated"); + nvgpu_err(g, "no graphics context allocated"); return -EFAULT; } @@ -1920,14 +1921,14 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, ret = gk20a_disable_channel_tsg(g, c); if (ret) { - gk20a_err(dev_from_gk20a(g), "failed to disable channel/TSG\n"); + nvgpu_err(g, "failed to disable channel/TSG\n"); return ret; } ret = gk20a_fifo_preempt(g, c); if (ret) { gk20a_enable_channel_tsg(g, c); - gk20a_err(dev_from_gk20a(g), "failed to preempt channel/TSG\n"); + nvgpu_err(g, "failed to preempt channel/TSG\n"); return ret; } @@ -1944,7 +1945,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, &pm_ctx->mem); if (ret) { c->g->ops.fifo.enable_channel(c); - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "failed to allocate pm ctxt buffer"); return ret; } @@ -1956,7 +1957,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, gk20a_mem_flag_none, true, pm_ctx->mem.aperture); if (!pm_ctx->mem.gpu_va) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "failed to map pm ctxt buffer"); nvgpu_dma_free(g, &pm_ctx->mem); c->g->ops.fifo.enable_channel(c); @@ -2152,7 +2153,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g, */ if (ch_ctx->pm_ctx.pm_mode == ctxsw_prog_main_image_pm_mode_ctxsw_f()) { if (ch_ctx->pm_ctx.mem.gpu_va == 0) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "context switched pm with no pm buffer!"); nvgpu_mem_end(g, mem); return -EFAULT; @@ -2201,7 +2202,6 @@ static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g) { struct mm_gk20a *mm = &g->mm; struct vm_gk20a *vm = &mm->pmu.vm; - struct device *d = dev_from_gk20a(g); struct gk20a_ctxsw_ucode_info *ucode_info = &g->ctxsw_ucode_info; int err; @@ -2220,7 +2220,7 @@ static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g) false, ucode_info->surface_desc.aperture); if (!ucode_info->surface_desc.gpu_va) { - gk20a_err(d, "failed to update gmmu ptes\n"); + nvgpu_err(g, "failed to update gmmu ptes\n"); return -ENOMEM; } @@ -2274,7 +2274,6 @@ static int gr_gk20a_copy_ctxsw_ucode_segments( int gr_gk20a_init_ctxsw_ucode(struct gk20a *g) { - struct device *d = dev_from_gk20a(g); struct mm_gk20a *mm = &g->mm; struct vm_gk20a *vm = &mm->pmu.vm; struct gk20a_ctxsw_bootloader_desc *fecs_boot_desc; @@ -2289,7 +2288,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g) fecs_fw = nvgpu_request_firmware(g, GK20A_FECS_UCODE_IMAGE, 0); if (!fecs_fw) { - gk20a_err(d, "failed to load fecs ucode!!"); + nvgpu_err(g, "failed to load fecs ucode!!"); return -ENOENT; } @@ -2300,7 +2299,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g) gpccs_fw = nvgpu_request_firmware(g, GK20A_GPCCS_UCODE_IMAGE, 0); if (!gpccs_fw) { release_firmware(fecs_fw); - gk20a_err(d, "failed to load gpccs ucode!!"); + nvgpu_err(g, "failed to load gpccs ucode!!"); return -ENOENT; } @@ -2373,7 +2372,7 @@ void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g) retries--; } if (!retries) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "arbiter idle timeout, status: %08x", gk20a_readl(g, gr_fecs_ctxsw_status_1_r())); } @@ -2405,7 +2404,7 @@ void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g) val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r()); } if (!retries) - gk20a_err(dev_from_gk20a(g), "arbiter complete timeout"); + nvgpu_err(g, "arbiter complete timeout"); gk20a_writel(g, gr_fecs_current_ctx_r(), gr_fecs_current_ctx_ptr_f(inst_ptr >> 12) | @@ -2422,7 +2421,7 @@ void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g) val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r()); } if (!retries) - gk20a_err(dev_from_gk20a(g), "arbiter complete timeout"); + nvgpu_err(g, "arbiter complete timeout"); } void gr_gk20a_load_ctxsw_ucode_header(struct gk20a *g, u64 addr_base, @@ -2499,7 +2498,7 @@ void gr_gk20a_load_ctxsw_ucode_header(struct gk20a *g, u64 addr_base, gk20a_writel(g, reg_offset + gr_fecs_dmemd_r(0), 0); break; default: - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "unknown falcon ucode boot signature 0x%08x" " with reg_offset 0x%08x", segments->boot_signature, reg_offset); @@ -2631,7 +2630,7 @@ static int gr_gk20a_wait_ctxsw_ready(struct gk20a *g) eUcodeHandshakeInitComplete, GR_IS_UCODE_OP_SKIP, 0, false); if (ret) { - gk20a_err(dev_from_gk20a(g), "falcon ucode init timeout"); + nvgpu_err(g, "falcon ucode init timeout"); return ret; } @@ -2666,7 +2665,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g) op.mailbox.ret = &g->gr.ctx_vars.golden_image_size; ret = gr_gk20a_submit_fecs_method_op(g, op, false); if (ret) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "query golden image size failed"); return ret; } @@ -2675,7 +2674,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g) op.mailbox.ret = &g->gr.ctx_vars.zcull_ctxsw_image_size; ret = gr_gk20a_submit_fecs_method_op(g, op, false); if (ret) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "query zcull ctx image size failed"); return ret; } @@ -2684,7 +2683,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g) op.mailbox.ret = &g->gr.ctx_vars.pm_ctxsw_image_size; ret = gr_gk20a_submit_fecs_method_op(g, op, false); if (ret) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "query pm ctx image size failed"); return ret; } @@ -2815,7 +2814,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) return 0; clean_up: - gk20a_err(dev_from_gk20a(g), "fail"); + nvgpu_err(g, "fail"); gr_gk20a_free_global_ctx_buffers(g); return -ENOMEM; } @@ -2988,7 +2987,7 @@ static int gr_gk20a_alloc_tsg_gr_ctx(struct gk20a *g, int err; if (!tsg->vm) { - gk20a_err(dev_from_gk20a(tsg->g), "No address space bound\n"); + nvgpu_err(tsg->g, "No address space bound\n"); return -ENOMEM; } @@ -3029,7 +3028,7 @@ void gr_gk20a_free_gr_ctx(struct gk20a *g, void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *tsg) { if (!tsg->vm) { - gk20a_err(dev_from_gk20a(tsg->g), "No address space bound\n"); + nvgpu_err(tsg->g, "No address space bound\n"); return; } tsg->g->ops.gr.free_gr_ctx(tsg->g, tsg->vm, tsg->tsg_gr_ctx); @@ -3139,14 +3138,14 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, /* an address space needs to have been bound at this point.*/ if (!gk20a_channel_as_bound(c) && !c->vm) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "not bound to address space at time" " of grctx allocation"); return -EINVAL; } if (!g->ops.gr.is_valid_class(g, args->class_num)) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "invalid obj class 0x%x", args->class_num); err = -EINVAL; goto out; @@ -3163,7 +3162,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, args->class_num, args->flags); if (err) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "fail to allocate gr ctx buffer"); goto out; } @@ -3171,7 +3170,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, /*TBD: needs to be more subtle about which is * being allocated as some are allowed to be * allocated along same channel */ - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "too many classes alloc'd on same channel"); err = -EINVAL; goto out; @@ -3184,7 +3183,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, args->class_num, args->flags); if (err) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "fail to allocate TSG gr ctx buffer"); gk20a_vm_put(tsg->vm); tsg->vm = NULL; @@ -3200,7 +3199,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, /* commit gr ctx buffer */ err = g->ops.gr.commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va); if (err) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "fail to commit gr ctx buffer"); goto out; } @@ -3209,7 +3208,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, if (ch_ctx->patch_ctx.mem.sgt == NULL) { err = gr_gk20a_alloc_channel_patch_ctx(g, c); if (err) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "fail to allocate patch buffer"); goto out; } @@ -3219,7 +3218,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, if (!ch_ctx->global_ctx_buffer_mapped) { err = gr_gk20a_map_global_ctx_buffers(g, c); if (err) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "fail to map global ctx buffer"); goto out; } @@ -3237,7 +3236,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, if (support_gk20a_pmu(g->dev)) { err = gk20a_pmu_disable_elpg(g); if (err) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "failed to set disable elpg"); } } @@ -3278,7 +3277,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, lockboost, true); gr_gk20a_ctx_patch_write_end(g, ch_ctx); } else { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "failed to set texlock for compute class"); } @@ -3291,7 +3290,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, /* init golden image, ELPG enabled after this is done */ err = gr_gk20a_init_golden_ctx_image(g, c); if (err) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "fail to init golden ctx image"); goto out; } @@ -3301,14 +3300,14 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, err = gr_gk20a_elpg_protected_call(g, gr_gk20a_load_golden_ctx_image(g, c)); if (err) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "fail to load golden ctx image"); goto out; } if (g->ops.fecs_trace.bind_channel && !c->vpr) { err = g->ops.fecs_trace.bind_channel(g, c); if (err) { - gk20a_warn(dev_from_gk20a(g), + nvgpu_warn(g, "fail to bind channel for ctxsw trace"); } } @@ -3322,7 +3321,7 @@ out: can be reused so no need to release them. 2. golden image init and load is a one time thing so if they pass, no need to undo. */ - gk20a_err(dev_from_gk20a(g), "fail"); + nvgpu_err(g, "fail"); return err; } @@ -3490,7 +3489,7 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) gr->max_zcull_per_gpc_count = nvgpu_get_litter_value(g, GPU_LIT_NUM_ZCULL_BANKS); if (!gr->gpc_count) { - gk20a_err(dev_from_gk20a(g), "gpc_count==0!"); + nvgpu_err(g, "gpc_count==0!"); goto clean_up; } @@ -3846,7 +3845,7 @@ clean_up: nvgpu_kfree(g, sorted_to_unsorted_gpc_map); if (ret) - gk20a_err(dev_from_gk20a(g), "fail"); + nvgpu_err(g, "fail"); else gk20a_dbg_fn("done"); @@ -3936,7 +3935,7 @@ static void gr_gk20a_detect_sm_arch(struct gk20a *g) if (raw_version == gr_gpc0_tpc0_sm_arch_spa_version_smkepler_lp_v()) version = 0x320; /* SM 3.2 */ else - gk20a_err(dev_from_gk20a(g), "Unknown SM version 0x%x\n", + nvgpu_err(g, "Unknown SM version 0x%x\n", raw_version); /* on Kepler, SM version == SPA version */ @@ -4030,7 +4029,7 @@ void gr_gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) ret = gk20a_fifo_disable_engine_activity(g, gr_info, true); if (ret) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "failed to disable gr engine activity"); return; } @@ -4038,7 +4037,7 @@ void gr_gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g), GR_IDLE_CHECK_DEFAULT); if (ret) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "failed to idle graphics"); goto clean_up; } @@ -4049,7 +4048,7 @@ void gr_gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) clean_up: ret = gk20a_fifo_enable_engine_activity(g, gr_info); if (ret) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "failed to enable gr engine activity\n"); } } @@ -4080,7 +4079,7 @@ int gr_gk20a_add_zbc(struct gk20a *g, struct gr_gk20a *gr, if (memcmp(c_tbl->color_l2, zbc_val->color_l2, sizeof(zbc_val->color_l2))) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "zbc l2 and ds color don't match with existing entries"); ret = -EINVAL; goto err_mutex; @@ -4140,14 +4139,14 @@ int gr_gk20a_add_zbc(struct gk20a *g, struct gr_gk20a *gr, if (g->ops.gr.add_zbc_type_s) { added = g->ops.gr.add_zbc_type_s(g, gr, zbc_val, &ret); } else { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "invalid zbc table type %d", zbc_val->type); ret = -EINVAL; goto err_mutex; } break; default: - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "invalid zbc table type %d", zbc_val->type); ret = -EINVAL; goto err_mutex; @@ -4179,7 +4178,7 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr, break; case GK20A_ZBC_TYPE_COLOR: if (index >= GK20A_ZBC_TABLE_SIZE) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "invalid zbc color table index\n"); return -EINVAL; } @@ -4194,7 +4193,7 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr, break; case GK20A_ZBC_TYPE_DEPTH: if (index >= GK20A_ZBC_TABLE_SIZE) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "invalid zbc depth table index\n"); return -EINVAL; } @@ -4207,13 +4206,13 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr, return g->ops.gr.zbc_s_query_table(g, gr, query_params); } else { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "invalid zbc table type\n"); return -EINVAL; } break; default: - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "invalid zbc table type\n"); return -EINVAL; } @@ -4303,7 +4302,7 @@ int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr) if (!err) gr->max_default_color_index = 3; else { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "fail to load default zbc color table\n"); return err; } @@ -4322,7 +4321,7 @@ int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr) if (!err) gr->max_default_depth_index = 2; else { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "fail to load default zbc depth table\n"); return err; } @@ -4349,7 +4348,7 @@ int _gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr, ret = gk20a_fifo_disable_engine_activity(g, gr_info, true); if (ret) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "failed to disable gr engine activity"); return ret; } @@ -4357,7 +4356,7 @@ int _gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr, ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g), GR_IDLE_CHECK_DEFAULT); if (ret) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "failed to idle graphics"); goto clean_up; } @@ -4366,7 +4365,7 @@ int _gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr, clean_up: if (gk20a_fifo_enable_engine_activity(g, gr_info)) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "failed to enable gr engine activity"); } @@ -4400,7 +4399,7 @@ void gr_gk20a_init_blcg_mode(struct gk20a *g, u32 mode, u32 engine) therm_gate_ctrl_blk_clk_auto_f()); break; default: - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "invalid blcg mode %d", mode); return; } @@ -4435,7 +4434,7 @@ void gr_gk20a_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine) therm_gate_ctrl_eng_clk_auto_f()); break; default: - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "invalid elcg mode %d", mode); } @@ -4462,7 +4461,7 @@ void gr_gk20a_init_cg_mode(struct gk20a *g, u32 cgmode, u32 mode_config) g->ops.gr.init_elcg_mode(g, mode_config, active_engine_id); else - gk20a_err(dev_from_gk20a(g), "invalid cg mode %d %d", cgmode, mode_config); + nvgpu_err(g, "invalid cg mode %d %d", cgmode, mode_config); } } @@ -4592,7 +4591,7 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr) zcull_map_tiles = nvgpu_kzalloc(g, zcull_alloc_num * sizeof(u32)); if (!zcull_map_tiles) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "failed to allocate zcull map titles"); return -ENOMEM; } @@ -4600,7 +4599,7 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr) zcull_bank_counters = nvgpu_kzalloc(g, zcull_alloc_num * sizeof(u32)); if (!zcull_bank_counters) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "failed to allocate zcull bank counters"); nvgpu_kfree(g, zcull_map_tiles); return -ENOMEM; @@ -4626,7 +4625,7 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr) if (gpc_zcull_count != gr->max_zcull_per_gpc_count && gpc_zcull_count < gpc_tpc_count) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "zcull_banks (%d) less than tpcs (%d) for gpc (%d)", gpc_zcull_count, gpc_tpc_count, gpc_index); return -EINVAL; @@ -4991,7 +4990,7 @@ static int gk20a_init_gr_prepare(struct gk20a *g) if (!g->gr.ctx_vars.valid) { err = gr_gk20a_init_ctx_vars(g, &g->gr); if (err) - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "fail to load gr init ctx"); } return err; @@ -5024,7 +5023,7 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g) udelay(CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT); } while (!nvgpu_timeout_expired(&timeout)); - gk20a_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout"); + nvgpu_err(g, "Falcon mem scrubbing timeout"); return -ETIMEDOUT; } @@ -5042,7 +5041,7 @@ static int gr_gk20a_init_ctxsw(struct gk20a *g) out: if (err) - gk20a_err(dev_from_gk20a(g), "fail"); + nvgpu_err(g, "fail"); else gk20a_dbg_fn("done"); @@ -5076,7 +5075,7 @@ static int gk20a_init_gr_reset_enable_hw(struct gk20a *g) out: if (err) - gk20a_err(dev_from_gk20a(g), "fail"); + nvgpu_err(g, "fail"); else gk20a_dbg_fn("done"); @@ -5094,7 +5093,7 @@ static int gr_gk20a_init_access_map(struct gk20a *g) unsigned int num_entries = 0; if (nvgpu_mem_begin(g, mem)) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "failed to map priv access map memory"); return -ENOMEM; } @@ -5188,7 +5187,7 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g) return 0; clean_up: - gk20a_err(dev_from_gk20a(g), "fail"); + nvgpu_err(g, "fail"); gk20a_remove_gr_support(gr); return err; } @@ -5198,7 +5197,6 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g) struct pmu_gk20a *pmu = &g->pmu; struct mm_gk20a *mm = &g->mm; struct vm_gk20a *vm = &mm->pmu.vm; - struct device *d = dev_from_gk20a(g); int err = 0; u32 size; @@ -5209,7 +5207,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g) err = gr_gk20a_fecs_get_reglist_img_size(g, &size); if (err) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "fail to query fecs pg buffer size"); return err; } @@ -5217,7 +5215,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g) if (!pmu->pg_buf.cpu_va) { err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pg_buf); if (err) { - gk20a_err(d, "failed to allocate memory\n"); + nvgpu_err(g, "failed to allocate memory\n"); return -ENOMEM; } } @@ -5225,14 +5223,14 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g) err = gr_gk20a_fecs_set_reglist_bind_inst(g, &mm->pmu.inst_block); if (err) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "fail to bind pmu inst to gr"); return err; } err = gr_gk20a_fecs_set_reglist_virtual_addr(g, pmu->pg_buf.gpu_va); if (err) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "fail to set pg buffer pmu va"); return err; } @@ -5496,21 +5494,21 @@ int gk20a_gr_reset(struct gk20a *g) size = 0; err = gr_gk20a_fecs_get_reglist_img_size(g, &size); if (err) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "fail to query fecs pg buffer size"); return err; } err = gr_gk20a_fecs_set_reglist_bind_inst(g, &g->mm.pmu.inst_block); if (err) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "fail to bind pmu inst to gr"); return err; } err = gr_gk20a_fecs_set_reglist_virtual_addr(g, g->pmu.pg_buf.gpu_va); if (err) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "fail to set pg buffer pmu va"); return err; } @@ -5593,7 +5591,7 @@ static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g, gk20a_dbg_fn(""); gk20a_gr_set_error_notifier(g, isr_data, NVGPU_CHANNEL_GR_SEMAPHORE_TIMEOUT); - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "gr semaphore timeout\n"); return -EINVAL; } @@ -5605,7 +5603,7 @@ static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g, gk20a_gr_set_error_notifier(g, isr_data, NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY); /* This is an unrecoverable error, reset is needed */ - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "gr semaphore timeout\n"); return -EINVAL; } @@ -5619,7 +5617,7 @@ static int gk20a_gr_handle_illegal_method(struct gk20a *g, if (ret) { gk20a_gr_set_error_notifier(g, isr_data, NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY); - gk20a_err(dev_from_gk20a(g), "invalid method class 0x%08x" + nvgpu_err(g, "invalid method class 0x%08x" ", offset 0x%08x address 0x%08x\n", isr_data->class_num, isr_data->offset, isr_data->addr); } @@ -5632,7 +5630,7 @@ static int gk20a_gr_handle_illegal_class(struct gk20a *g, gk20a_dbg_fn(""); gk20a_gr_set_error_notifier(g, isr_data, NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "invalid class 0x%08x, offset 0x%08x", isr_data->class_num, isr_data->offset); return -EINVAL; @@ -5649,14 +5647,14 @@ int gk20a_gr_handle_fecs_error(struct gk20a *g, struct channel_gk20a *ch, if (!gr_fecs_intr) return 0; - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "unhandled fecs error interrupt 0x%08x for channel %u", gr_fecs_intr, isr_data->chid); if (gr_fecs_intr & gr_fecs_host_int_status_umimp_firmware_method_f(1)) { gk20a_gr_set_error_notifier(g, isr_data, NVGPU_CHANNEL_FECS_ERR_UNIMP_FIRMWARE_METHOD); - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "firmware method error 0x%08x for offset 0x%04x", gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(6)), isr_data->data_lo); @@ -5678,7 +5676,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g, gr_class_error_code_v(gk20a_readl(g, gr_class_error_r())); gk20a_gr_set_error_notifier(g, isr_data, NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "class error 0x%08x, offset 0x%08x," " unhandled intr 0x%08x for channel %u\n", isr_data->class_num, isr_data->offset, @@ -5694,7 +5692,7 @@ static int gk20a_gr_handle_firmware_method(struct gk20a *g, gk20a_gr_set_error_notifier(g, isr_data, NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "firmware method 0x%08x, offset 0x%08x for channel %u\n", isr_data->class_num, isr_data->offset, isr_data->chid); @@ -5772,7 +5770,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g, /* validate offset */ if (offset + sizeof(struct share_buffer_head) > buffer_size || offset + sizeof(struct share_buffer_head) < offset) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "cyclestats buffer overrun at offset 0x%x\n", offset); break; @@ -5790,7 +5788,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g, if (sh_hdr->size < min_element_size || offset + sh_hdr->size > buffer_size || offset + sh_hdr->size < offset) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "bad cyclestate buffer header size at offset 0x%x\n", offset); sh_hdr->failed = true; @@ -5814,7 +5812,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g, u64 v; if (!valid) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "invalid cycletstats op offset: 0x%x\n", op_elem->offset_bar0); @@ -6070,7 +6068,7 @@ static int gk20a_gr_update_sm_error_state(struct gk20a *g, err = gr_gk20a_disable_ctxsw(g); if (err) { - gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw\n"); + nvgpu_err(g, "unable to stop gr ctxsw\n"); goto fail; } @@ -6130,7 +6128,7 @@ static int gk20a_gr_clear_sm_error_state(struct gk20a *g, err = gr_gk20a_disable_ctxsw(g); if (err) { - gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw\n"); + nvgpu_err(g, "unable to stop gr ctxsw\n"); goto fail; } @@ -6183,7 +6181,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, warp_esr = g->ops.gr.mask_hww_warp_esr(warp_esr); if (!sm_debugger_attached) { - gk20a_err(dev_from_gk20a(g), "sm hww global %08x warp %08x\n", + nvgpu_err(g, "sm hww global %08x warp %08x\n", global_esr, warp_esr); return -EFAULT; } @@ -6203,7 +6201,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, &early_exit, &ignore_debugger); if (ret) { - gk20a_err(dev_from_gk20a(g), "could not pre-process sm error!\n"); + nvgpu_err(g, "could not pre-process sm error!\n"); return ret; } } @@ -6237,7 +6235,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, if (do_warp_sync) { ret = gk20a_gr_lock_down_sm(g, gpc, tpc, global_mask, true); if (ret) { - gk20a_err(dev_from_gk20a(g), "sm did not lock down!\n"); + nvgpu_err(g, "sm did not lock down!\n"); return ret; } } @@ -6389,7 +6387,6 @@ static int gk20a_gr_post_bpt_events(struct gk20a *g, struct channel_gk20a *ch, int gk20a_gr_isr(struct gk20a *g) { - struct device *dev = dev_from_gk20a(g); struct gr_gk20a_isr_data isr_data; u32 grfifo_ctl; u32 obj_table; @@ -6520,14 +6517,14 @@ int gk20a_gr_isr(struct gk20a *g) if (exception & gr_exception_fe_m()) { u32 fe = gk20a_readl(g, gr_fe_hww_esr_r()); - gk20a_err(dev, "fe warning %08x", fe); + nvgpu_err(g, "fe warning %08x", fe); gk20a_writel(g, gr_fe_hww_esr_r(), fe); need_reset |= -EFAULT; } if (exception & gr_exception_memfmt_m()) { u32 memfmt = gk20a_readl(g, gr_memfmt_hww_esr_r()); - gk20a_err(dev, "memfmt exception %08x", memfmt); + nvgpu_err(g, "memfmt exception %08x", memfmt); gk20a_writel(g, gr_memfmt_hww_esr_r(), memfmt); need_reset |= -EFAULT; } @@ -6556,7 +6553,7 @@ int gk20a_gr_isr(struct gk20a *g) if (exception & gr_exception_ds_m()) { u32 ds = gk20a_readl(g, gr_ds_hww_esr_r()); - gk20a_err(dev, "ds exception %08x", ds); + nvgpu_err(g, "ds exception %08x", ds); gk20a_writel(g, gr_ds_hww_esr_r(), ds); need_reset |= -EFAULT; } @@ -6565,7 +6562,7 @@ int gk20a_gr_isr(struct gk20a *g) gr_intr &= ~gr_intr_exception_pending_f(); if (need_reset) { - gk20a_err(dev, "set gr exception notifier"); + nvgpu_err(g, "set gr exception notifier"); gk20a_gr_set_error_notifier(g, &isr_data, NVGPU_CHANNEL_GR_EXCEPTION); } @@ -6586,7 +6583,7 @@ int gk20a_gr_isr(struct gk20a *g) if (gr_intr && !ch) { /* Clear interrupts for unused channel. This is probably an interrupt during gk20a_free_channel() */ - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "unhandled gr interrupt 0x%08x for unreferenceable channel, clearing", gr_intr); gk20a_writel(g, gr_intr_r(), gr_intr); @@ -6598,7 +6595,7 @@ int gk20a_gr_isr(struct gk20a *g) gr_gpfifo_ctl_semaphore_access_f(1)); if (gr_intr) - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "unhandled gr interrupt 0x%08x", gr_intr); /* Posting of BPT events should be the last thing in this function */ @@ -7330,13 +7327,13 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g, context = (u8 *)context_buffer; /* sanity check main header */ if (!check_main_image_header_magic(context)) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "Invalid main header: magic value"); return -EINVAL; } num_gpcs = *(u32 *)(context + ctxsw_prog_main_image_num_gpcs_o()); if (gpc_num >= num_gpcs) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "GPC 0x%08x is greater than total count 0x%08x!\n", gpc_num, num_gpcs); return -EINVAL; @@ -7357,7 +7354,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g, /* check local header magic */ context += ctxsw_prog_ucode_header_size_in_bytes(); if (!check_local_header_magic(context)) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "Invalid local header: magic value\n"); return -EINVAL; } @@ -7388,7 +7385,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g, (sm_dsm_perf_regs[sm_dsm_perf_reg_id] & tpc_gpc_mask); if (chk_addr != addr) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "Oops addr miss-match! : 0x%08x != 0x%08x\n", addr, chk_addr); return -EINVAL; @@ -7419,7 +7416,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g, tpc_gpc_mask); if (chk_addr != addr) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "Oops addr miss-match! : 0x%08x != 0x%08x\n", addr, chk_addr); return -EINVAL; @@ -7488,7 +7485,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g, /* last sanity check: did we somehow compute an offset outside the * extended buffer? */ if (offset_to_segment > offset_to_segment_end) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "Overflow ctxsw buffer! 0x%08x > 0x%08x\n", offset_to_segment, offset_to_segment_end); return -EINVAL; @@ -7680,7 +7677,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, context = (u8 *)context_buffer; if (!check_main_image_header_magic(context)) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "Invalid main header: magic value"); return -EINVAL; } @@ -7689,7 +7686,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, /* Parse the FECS local header. */ context += ctxsw_prog_ucode_header_size_in_bytes(); if (!check_local_header_magic(context)) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "Invalid FECS local header: magic value\n"); return -EINVAL; } @@ -7724,7 +7721,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, } if ((gpc_num + 1) > num_gpcs) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "GPC %d not in this context buffer.\n", gpc_num); return -EINVAL; @@ -7734,7 +7731,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, for (i = 0; i < num_gpcs; i++) { context += ctxsw_prog_ucode_header_size_in_bytes(); if (!check_local_header_magic(context)) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "Invalid GPCCS local header: magic value\n"); return -EINVAL; @@ -7751,7 +7748,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, num_tpcs = *(u32 *)(context + ctxsw_prog_local_image_num_tpcs_o()); if ((i == gpc_num) && ((tpc_num + 1) > num_tpcs)) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "GPC %d TPC %d not in this context buffer.\n", gpc_num, tpc_num); return -EINVAL; @@ -8159,7 +8156,7 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g) goto cleanup; if (offset > hwpm_ctxsw_buffer_size) { - gk20a_err(dev_from_gk20a(g), "offset > buffer size"); + nvgpu_err(g, "offset > buffer size"); goto cleanup; } @@ -8175,7 +8172,7 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g) return 0; cleanup: - gk20a_err(dev_from_gk20a(g), "Failed to create HWPM buffer offset map"); + nvgpu_err(g, "Failed to create HWPM buffer offset map"); nvgpu_big_free(g, map); return -EINVAL; } @@ -8213,7 +8210,7 @@ static int gr_gk20a_find_priv_offset_in_pm_buffer(struct gk20a *g, if (result) *priv_offset = result->offset; else { - gk20a_err(dev_from_gk20a(g), "Lookup failed for address 0x%x", addr); + nvgpu_err(g, "Lookup failed for address 0x%x", addr); err = -EINVAL; } return err; @@ -8278,7 +8275,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, */ err = gr_gk20a_disable_ctxsw(g); if (err) { - gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw"); + nvgpu_err(g, "unable to stop gr ctxsw"); /* this should probably be ctx-fatal... */ goto cleanup; } @@ -8418,7 +8415,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, if (!pm_ctx_ready) { /* Make sure ctx buffer was initialized */ if (!ch_ctx->pm_ctx.mem.pages) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "Invalid ctx buffer"); err = -EINVAL; goto cleanup; @@ -8515,7 +8512,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, if (restart_gr_ctxsw) { int tmp_err = gr_gk20a_enable_ctxsw(g); if (tmp_err) { - gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n"); + nvgpu_err(g, "unable to restart ctxsw!\n"); err = tmp_err; } } @@ -8659,7 +8656,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, * enabled, the sm will never lock down. */ if (!mmu_debug_mode_enabled && (g->ops.mm.mmu_fault_pending(g))) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "GPC%d TPC%d: mmu fault pending," " sm will never lock down!", gpc, tpc); return -EFAULT; @@ -8684,9 +8681,9 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, warps_trapped = (u64)gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_1_r() + offset) << 32; warps_trapped |= gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_r() + offset); - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "GPC%d TPC%d: timed out while trying to lock down SM", gpc, tpc); - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "STATUS0(0x%x)=0x%x CONTROL0=0x%x VALID_MASK=0x%llx PAUSE_MASK=0x%llx TRAP_MASK=0x%llx\n", gr_gpc0_tpc0_sm_dbgr_status0_r() + offset, dbgr_status0, dbgr_control0, warps_valid, warps_paused, warps_trapped); @@ -8707,7 +8704,7 @@ void gk20a_suspend_single_sm(struct gk20a *g, /* if an SM debugger isn't attached, skip suspend */ if (!gk20a_gr_sm_debugger_attached(g)) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "SM debugger not attached, skipping suspend!\n"); return; } @@ -8722,7 +8719,7 @@ void gk20a_suspend_single_sm(struct gk20a *g, err = gk20a_gr_wait_for_sm_lock_down(g, gpc, tpc, global_esr_mask, check_errors); if (err) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "SuspendSm failed\n"); return; } @@ -8738,7 +8735,7 @@ void gk20a_suspend_all_sms(struct gk20a *g, /* if an SM debugger isn't attached, skip suspend */ if (!gk20a_gr_sm_debugger_attached(g)) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "SM debugger not attached, skipping suspend!\n"); return; } @@ -8759,7 +8756,7 @@ void gk20a_suspend_all_sms(struct gk20a *g, gk20a_gr_wait_for_sm_lock_down(g, gpc, tpc, global_esr_mask, check_errors); if (err) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "SuspendAllSms failed\n"); return; } @@ -9068,7 +9065,7 @@ int gr_gk20a_set_sm_debug_mode(struct gk20a *g, err = gr_gk20a_exec_ctx_ops(ch, ops, i, i, 0); if (err) - gk20a_err(dev_from_gk20a(g), "Failed to access register\n"); + nvgpu_err(g, "Failed to access register\n"); nvgpu_kfree(g, ops); return err; } @@ -9188,7 +9185,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g, err = gr_gk20a_disable_ctxsw(g); if (err) { - gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw"); + nvgpu_err(g, "unable to stop gr ctxsw"); goto clean_up; } @@ -9206,7 +9203,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g, err = gr_gk20a_enable_ctxsw(g); if (err) - gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n"); + nvgpu_err(g, "unable to restart ctxsw!\n"); *ctx_resident_ch_fd = local_ctx_resident_ch_fd; @@ -9230,7 +9227,7 @@ int gr_gk20a_resume_contexts(struct gk20a *g, err = gr_gk20a_disable_ctxsw(g); if (err) { - gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw"); + nvgpu_err(g, "unable to stop gr ctxsw"); goto clean_up; } @@ -9244,7 +9241,7 @@ int gr_gk20a_resume_contexts(struct gk20a *g, err = gr_gk20a_enable_ctxsw(g); if (err) - gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n"); + nvgpu_err(g, "unable to restart ctxsw!\n"); *ctx_resident_ch_fd = local_ctx_resident_ch_fd; @@ -9308,7 +9305,7 @@ int gr_gk20a_inval_icache(struct gk20a *g, struct channel_gk20a *ch) err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 0, 1); if (err) { - gk20a_err(dev_from_gk20a(g), "Failed to read register"); + nvgpu_err(g, "Failed to read register"); return err; } @@ -9318,7 +9315,7 @@ int gr_gk20a_inval_icache(struct gk20a *g, struct channel_gk20a *ch) ops.value_lo = set_field(regval, gr_pri_gpcs_gcc_dbg_invalidate_m(), 1); err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 1, 0); if (err) { - gk20a_err(dev_from_gk20a(g), "Failed to write register"); + nvgpu_err(g, "Failed to write register"); return err; } @@ -9326,7 +9323,7 @@ int gr_gk20a_inval_icache(struct gk20a *g, struct channel_gk20a *ch) ops.offset = gr_pri_gpc0_tpc0_sm_cache_control_r(); err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 0, 1); if (err) { - gk20a_err(dev_from_gk20a(g), "Failed to read register"); + nvgpu_err(g, "Failed to read register"); return err; } @@ -9380,7 +9377,7 @@ int gr_gk20a_wait_for_pause(struct gk20a *g, struct warpstate *w_state) err = gk20a_gr_lock_down_sm(g, gpc, tpc, global_mask, false); if (err) { - gk20a_err(dev_from_gk20a(g), "sm did not lock down!\n"); + nvgpu_err(g, "sm did not lock down!"); return err; } } diff --git a/drivers/gpu/nvgpu/gk20a/hal.c b/drivers/gpu/nvgpu/gk20a/hal.c index dc4fcf1c..bbde885f 100644 --- a/drivers/gpu/nvgpu/gk20a/hal.c +++ b/drivers/gpu/nvgpu/gk20a/hal.c @@ -23,6 +23,8 @@ #include "nvgpu_gpuid_t19x.h" #endif +#include + int gpu_init_hal(struct gk20a *g) { u32 ver = g->gpu_characteristics.arch + g->gpu_characteristics.impl; @@ -54,7 +56,7 @@ int gpu_init_hal(struct gk20a *g) break; #endif default: - gk20a_err(g->dev, "no support for %x", ver); + nvgpu_err(g, "no support for %x", ver); return -ENODEV; } diff --git a/drivers/gpu/nvgpu/gk20a/hal_gk20a.c b/drivers/gpu/nvgpu/gk20a/hal_gk20a.c index 7a13ed9c..00d57022 100644 --- a/drivers/gpu/nvgpu/gk20a/hal_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/hal_gk20a.c @@ -35,6 +35,8 @@ #include "css_gr_gk20a.h" #include "pramin_gk20a.h" +#include + #include static struct gpu_ops gk20a_ops = { @@ -132,7 +134,7 @@ static int gk20a_get_litter_value(struct gk20a *g, int value) ret = 0; break; default: - gk20a_err(dev_from_gk20a(g), "Missing definition %d", value); + nvgpu_err(g, "Missing definition %d", value); BUG(); break; } diff --git a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c index 1b9d515c..f8416d55 100644 --- a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c @@ -20,6 +20,7 @@ #include #include +#include #include "gk20a.h" @@ -160,8 +161,7 @@ static int gk20a_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, } while (!nvgpu_timeout_expired(&timeout)); if (nvgpu_timeout_peek_expired(&timeout)) { - gk20a_err(dev_from_gk20a(g), - "comp tag clear timeout\n"); + nvgpu_err(g, "comp tag clear timeout"); err = -EBUSY; goto out; } @@ -186,7 +186,7 @@ static void gk20a_ltc_isr(struct gk20a *g) u32 intr; intr = gk20a_readl(g, ltc_ltc0_ltss_intr_r()); - gk20a_err(dev_from_gk20a(g), "ltc: %08x\n", intr); + nvgpu_err(g, "ltc: %08x\n", intr); gk20a_writel(g, ltc_ltc0_ltss_intr_r(), intr); } @@ -215,7 +215,7 @@ static int gk20a_determine_L2_size_bytes(struct gk20a *g) ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v()) { sets = 16; } else { - dev_err(dev_from_gk20a(g), + nvgpu_err(g, "Unknown constant %u for active sets", (unsigned)active_sets_value); sets = 0; diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index ab3dc3f9..78332ee7 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c @@ -39,6 +39,7 @@ #include #include #include +#include #include "gk20a.h" #include "mm_gk20a.h" @@ -536,7 +537,7 @@ static int gk20a_vidmem_clear_all(struct gk20a *g) 0, NULL); if (err) { - gk20a_err(g->dev, + nvgpu_err(g, "Failed to clear vidmem region 1 : %d", err); return err; } @@ -555,7 +556,7 @@ static int gk20a_vidmem_clear_all(struct gk20a *g) 0, &gk20a_fence_out); if (err) { - gk20a_err(g->dev, + nvgpu_err(g, "Failed to clear vidmem region 2 : %d", err); return err; } @@ -575,7 +576,7 @@ static int gk20a_vidmem_clear_all(struct gk20a *g) gk20a_fence_put(gk20a_fence_out); if (err) { - gk20a_err(g->dev, + nvgpu_err(g, "fence wait failed for CE execute ops"); return err; } @@ -591,7 +592,6 @@ static int gk20a_init_vidmem(struct mm_gk20a *mm) { #if defined(CONFIG_GK20A_VIDMEM) struct gk20a *g = mm->g; - struct device *d = dev_from_gk20a(g); size_t size = g->ops.mm.get_vidmem_size ? g->ops.mm.get_vidmem_size(g) : 0; u64 bootstrap_base, bootstrap_size, base; @@ -625,7 +625,7 @@ static int gk20a_init_vidmem(struct mm_gk20a *mm) default_page_size, GPU_ALLOC_4K_VIDMEM_PAGES); if (err) { - gk20a_err(d, "Failed to register vidmem for size %zu: %d", + nvgpu_err(g, "Failed to register vidmem for size %zu: %d", size, err); return err; } @@ -796,7 +796,7 @@ void gk20a_init_mm_ce_context(struct gk20a *g) NULL); if (g->mm.vidmem.ce_ctx_id == (u32)~0) - gk20a_err(g->dev, + nvgpu_err(g, "Failed to allocate CE context for vidmem page clearing support"); } #endif @@ -882,7 +882,6 @@ static void unmap_gmmu_phys_pages(struct gk20a_mm_entry *entry) static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, struct gk20a_mm_entry *entry) { - struct device *d = dev_from_vm(vm); struct gk20a *g = gk20a_from_vm(vm); u32 num_pages = 1 << order; u32 len = num_pages * PAGE_SIZE; @@ -905,7 +904,7 @@ static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, if (err) { - gk20a_err(d, "memory allocation failed"); + nvgpu_err(g, "memory allocation failed"); return -ENOMEM; } @@ -1209,7 +1208,7 @@ void gk20a_vm_put_buffers(struct vm_gk20a *vm, static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset, struct vm_gk20a_mapping_batch *batch) { - struct device *d = dev_from_vm(vm); + struct gk20a *g = vm->mm->g; struct mapped_buffer_node *mapped_buffer; nvgpu_mutex_acquire(&vm->update_gmmu_lock); @@ -1217,7 +1216,7 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset, mapped_buffer = find_mapped_buffer_locked(vm->mapped_buffers, offset); if (!mapped_buffer) { nvgpu_mutex_release(&vm->update_gmmu_lock); - gk20a_err(d, "invalid addr to unmap 0x%llx", offset); + nvgpu_err(g, "invalid addr to unmap 0x%llx", offset); return; } @@ -1240,7 +1239,7 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset, if (mapped_buffer->user_mapped == 0) { nvgpu_mutex_release(&vm->update_gmmu_lock); - gk20a_err(d, "addr already unmapped from user 0x%llx", offset); + nvgpu_err(g, "addr already unmapped from user 0x%llx", offset); return; } @@ -1284,7 +1283,7 @@ u64 gk20a_vm_alloc_va(struct vm_gk20a *vm, offset = nvgpu_alloc(vma, size); if (!offset) { - gk20a_err(dev_from_vm(vm), + nvgpu_err(vm->mm->g, "%s oom: sz=0x%llx", vma->name, size); return 0; } @@ -1405,14 +1404,13 @@ static int setup_buffer_kind_and_compression(struct vm_gk20a *vm, { bool kind_compressible; struct gk20a *g = gk20a_from_vm(vm); - struct device *d = dev_from_gk20a(g); int ctag_granularity = g->ops.fb.compression_page_size(g); if (unlikely(bfr->kind_v == gmmu_pte_kind_invalid_v())) bfr->kind_v = gmmu_pte_kind_pitch_v(); if (unlikely(!gk20a_kind_is_supported(bfr->kind_v))) { - gk20a_err(d, "kind 0x%x not supported", bfr->kind_v); + nvgpu_err(g, "kind 0x%x not supported", bfr->kind_v); return -EINVAL; } @@ -1423,7 +1421,7 @@ static int setup_buffer_kind_and_compression(struct vm_gk20a *vm, bfr->uc_kind_v = gk20a_get_uncompressed_kind(bfr->kind_v); if (unlikely(bfr->uc_kind_v == gmmu_pte_kind_invalid_v())) { /* shouldn't happen, but it is worth cross-checking */ - gk20a_err(d, "comptag kind 0x%x can't be" + nvgpu_err(g, "comptag kind 0x%x can't be" " downgraded to uncompressed kind", bfr->kind_v); return -EINVAL; @@ -1432,9 +1430,6 @@ static int setup_buffer_kind_and_compression(struct vm_gk20a *vm, /* comptags only supported for suitable kinds, 128KB pagesize */ if (kind_compressible && vm->gmmu_page_sizes[pgsz_idx] < g->ops.fb.compressible_page_size(g)) { - /* - gk20a_warn(d, "comptags specified" - " but pagesize being used doesn't support it");*/ /* it is safe to fall back to uncompressed as functionality is not harmed */ bfr->kind_v = bfr->uc_kind_v; @@ -1453,19 +1448,19 @@ static int validate_fixed_buffer(struct vm_gk20a *vm, u64 map_offset, u64 map_size, struct vm_reserved_va_node **pva_node) { - struct device *dev = dev_from_vm(vm); + struct gk20a *g = vm->mm->g; struct vm_reserved_va_node *va_node; struct mapped_buffer_node *buffer; u64 map_end = map_offset + map_size; /* can wrap around with insane map_size; zero is disallowed too */ if (map_end <= map_offset) { - gk20a_warn(dev, "fixed offset mapping with invalid map_size"); + nvgpu_warn(g, "fixed offset mapping with invalid map_size"); return -EINVAL; } if (map_offset & (vm->gmmu_page_sizes[bfr->pgsz_idx] - 1)) { - gk20a_err(dev, "map offset must be buffer page size aligned 0x%llx", + nvgpu_err(g, "map offset must be buffer page size aligned 0x%llx", map_offset); return -EINVAL; } @@ -1474,13 +1469,13 @@ static int validate_fixed_buffer(struct vm_gk20a *vm, * userspace-managed address spaces */ va_node = addr_to_reservation(vm, map_offset); if (!va_node && !vm->userspace_managed) { - gk20a_warn(dev, "fixed offset mapping without space allocation"); + nvgpu_warn(g, "fixed offset mapping without space allocation"); return -EINVAL; } /* Mapped area should fit inside va, if there's one */ if (va_node && map_end > va_node->vaddr_start + va_node->size) { - gk20a_warn(dev, "fixed offset mapping size overflows va node"); + nvgpu_warn(g, "fixed offset mapping size overflows va node"); return -EINVAL; } @@ -1490,7 +1485,7 @@ static int validate_fixed_buffer(struct vm_gk20a *vm, buffer = find_mapped_buffer_less_than_locked( vm->mapped_buffers, map_offset + map_size); if (buffer && buffer->addr + buffer->size > map_offset) { - gk20a_warn(dev, "overlapping buffer map requested"); + nvgpu_warn(g, "overlapping buffer map requested"); return -EINVAL; } @@ -1517,7 +1512,6 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, { int err = 0; bool allocated = false; - struct device *d = dev_from_vm(vm); struct gk20a *g = gk20a_from_vm(vm); int ctag_granularity = g->ops.fb.compression_page_size(g); u32 ctag_lines = DIV_ROUND_UP_ULL(size, ctag_granularity); @@ -1527,7 +1521,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, map_offset = gk20a_vm_alloc_va(vm, size, pgsz_idx); if (!map_offset) { - gk20a_err(d, "failed to allocate va space"); + nvgpu_err(g, "failed to allocate va space"); err = -ENOMEM; goto fail_alloc; } @@ -1563,7 +1557,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, priv, aperture); if (err) { - gk20a_err(d, "failed to update ptes on map"); + nvgpu_err(g, "failed to update ptes on map"); goto fail_validate; } @@ -1577,7 +1571,7 @@ fail_validate: if (allocated) gk20a_vm_free_va(vm, map_offset, size, pgsz_idx); fail_alloc: - gk20a_err(d, "%s: failed with err=%d\n", __func__, err); + nvgpu_err(g, "%s: failed with err=%d\n", __func__, err); return 0; } @@ -1596,8 +1590,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm, if (va_allocated) { err = gk20a_vm_free_va(vm, vaddr, size, pgsz_idx); if (err) { - dev_err(dev_from_vm(vm), - "failed to free va"); + nvgpu_err(g, "failed to free va"); return; } } @@ -1614,8 +1607,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm, sparse, 0, APERTURE_INVALID); /* don't care for unmap */ if (err) - dev_err(dev_from_vm(vm), - "failed to update gmmu ptes on unmap"); + nvgpu_err(g, "failed to update gmmu ptes on unmap"); /* flush l2 so any dirty lines are written out *now*. * also as we could potentially be switching this buffer @@ -1647,7 +1639,7 @@ static enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g, } else if (WARN_ON(buf_owner == g && !g->mm.vidmem_is_vidmem)) { /* Looks like our video memory, but this gpu doesn't support * it. Warn about a bug and bail out */ - gk20a_warn(dev_from_gk20a(g), + nvgpu_warn(g, "dmabuf is our vidmem but we don't have local vidmem"); return APERTURE_INVALID; } else if (buf_owner != g) { @@ -1860,7 +1852,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes) if (!g->mm.vidmem.cleared) { err = gk20a_vidmem_clear_all(g); if (err) { - gk20a_err(g->dev, + nvgpu_err(g, "failed to clear whole vidmem"); goto err_kfree; } @@ -2037,7 +2029,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm, if (user_mapped && vm->userspace_managed && !(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)) { - gk20a_err(d, + nvgpu_err(g, "%s: non-fixed-offset mapping not available on userspace managed address spaces", __func__); return -EFAULT; @@ -2068,7 +2060,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm, * track the difference between those two cases we have * to fail the mapping when we run out of SMMU space. */ - gk20a_warn(d, "oom allocating tracking buffer"); + nvgpu_warn(g, "oom allocating tracking buffer"); goto clean_up; } @@ -2111,7 +2103,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm, err = setup_buffer_kind_and_compression(vm, flags, &bfr, bfr.pgsz_idx); if (unlikely(err)) { - gk20a_err(d, "failure setting up kind and compression"); + nvgpu_err(g, "failure setting up kind and compression"); goto clean_up; } @@ -2204,7 +2196,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm, /* TBD: check for multiple mapping of same buffer */ mapped_buffer = nvgpu_kzalloc(g, sizeof(*mapped_buffer)); if (!mapped_buffer) { - gk20a_warn(d, "oom allocating tracking buffer"); + nvgpu_warn(g, "oom allocating tracking buffer"); goto clean_up; } mapped_buffer->dmabuf = dmabuf; @@ -2230,7 +2222,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm, err = insert_mapped_buffer(vm, mapped_buffer); if (err) { - gk20a_err(d, "failed to insert into mapped buffer tree"); + nvgpu_err(g, "failed to insert into mapped buffer tree"); goto clean_up; } inserted = true; @@ -2274,7 +2266,7 @@ int gk20a_vm_get_compbits_info(struct vm_gk20a *vm, u32 *flags) { struct mapped_buffer_node *mapped_buffer; - struct device *d = dev_from_vm(vm); + struct gk20a *g = vm->mm->g; nvgpu_mutex_acquire(&vm->update_gmmu_lock); @@ -2283,7 +2275,7 @@ int gk20a_vm_get_compbits_info(struct vm_gk20a *vm, if (!mapped_buffer || !mapped_buffer->user_mapped) { nvgpu_mutex_release(&vm->update_gmmu_lock); - gk20a_err(d, "%s: bad offset 0x%llx", __func__, mapping_gva); + nvgpu_err(g, "%s: bad offset 0x%llx", __func__, mapping_gva); return -EFAULT; } @@ -2316,19 +2308,18 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm, { struct mapped_buffer_node *mapped_buffer; struct gk20a *g = gk20a_from_vm(vm); - struct device *d = dev_from_vm(vm); const bool fixed_mapping = (flags & NVGPU_AS_MAP_BUFFER_COMPBITS_FLAGS_FIXED_OFFSET) != 0; if (vm->userspace_managed && !fixed_mapping) { - gk20a_err(d, + nvgpu_err(g, "%s: non-fixed-offset mapping is not available on userspace managed address spaces", __func__); return -EFAULT; } if (fixed_mapping && !vm->userspace_managed) { - gk20a_err(d, + nvgpu_err(g, "%s: fixed-offset mapping is available only on userspace managed address spaces", __func__); return -EFAULT; @@ -2341,13 +2332,13 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm, if (!mapped_buffer || !mapped_buffer->user_mapped) { nvgpu_mutex_release(&vm->update_gmmu_lock); - gk20a_err(d, "%s: bad offset 0x%llx", __func__, mapping_gva); + nvgpu_err(g, "%s: bad offset 0x%llx", __func__, mapping_gva); return -EFAULT; } if (!mapped_buffer->ctags_mappable) { nvgpu_mutex_release(&vm->update_gmmu_lock); - gk20a_err(d, "%s: comptags not mappable, offset 0x%llx", + nvgpu_err(g, "%s: comptags not mappable, offset 0x%llx", __func__, mapping_gva); return -EFAULT; } @@ -2366,7 +2357,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm, if (!mapped_buffer->ctag_map_win_size) { nvgpu_mutex_release(&vm->update_gmmu_lock); - gk20a_err(d, + nvgpu_err(g, "%s: mapping 0x%llx does not have " "mappable comptags", __func__, mapping_gva); @@ -2402,7 +2393,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm, * before before the buffer is * unmapped */ nvgpu_mutex_release(&vm->update_gmmu_lock); - gk20a_err(d, + nvgpu_err(g, "%s: comptags cannot be mapped into allocated space", __func__); return -EINVAL; @@ -2429,7 +2420,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm, if (!mapped_buffer->ctag_map_win_addr) { nvgpu_mutex_release(&vm->update_gmmu_lock); - gk20a_err(d, + nvgpu_err(g, "%s: failed to map comptags for mapping 0x%llx", __func__, mapping_gva); return -ENOMEM; @@ -2437,7 +2428,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm, } else if (fixed_mapping && *compbits_win_gva && mapped_buffer->ctag_map_win_addr != *compbits_win_gva) { nvgpu_mutex_release(&vm->update_gmmu_lock); - gk20a_err(d, + nvgpu_err(g, "%s: re-requesting comptags map into mismatching address. buffer offset 0x" "%llx, existing comptag map at 0x%llx, requested remap 0x%llx", __func__, mapping_gva, @@ -2486,7 +2477,7 @@ static u64 __gk20a_gmmu_map(struct vm_gk20a *vm, aperture); nvgpu_mutex_release(&vm->update_gmmu_lock); if (!vaddr) { - gk20a_err(dev_from_vm(vm), "failed to allocate va space"); + nvgpu_err(g, "failed to allocate va space"); return 0; } @@ -2553,7 +2544,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem) &gk20a_fence_out); if (err) { - gk20a_err(g->dev, + nvgpu_err(g, "Failed gk20a_ce_execute_ops[%d]", err); return err; } @@ -2576,7 +2567,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem) gk20a_fence_put(gk20a_last_fence); if (err) - gk20a_err(g->dev, + nvgpu_err(g, "fence wait failed for CE execute ops"); } @@ -2692,7 +2683,7 @@ int gk20a_get_sgtable(struct device *d, struct sg_table **sgt, int err = 0; *sgt = nvgpu_kzalloc(g, sizeof(struct sg_table)); if (!(*sgt)) { - dev_err(d, "failed to allocate memory\n"); + nvgpu_err(g, "failed to allocate memory\n"); err = -ENOMEM; goto fail; } @@ -2700,7 +2691,7 @@ int gk20a_get_sgtable(struct device *d, struct sg_table **sgt, cpuva, iova, size); if (err) { - dev_err(d, "failed to create sg table\n"); + nvgpu_err(g, "failed to create sg table\n"); goto fail; } sg_dma_address((*sgt)->sgl) = iova; @@ -2723,14 +2714,14 @@ int gk20a_get_sgtable_from_pages(struct device *d, struct sg_table **sgt, *sgt = nvgpu_kzalloc(g, sizeof(struct sg_table)); if (!(*sgt)) { - dev_err(d, "failed to allocate memory\n"); + nvgpu_err(g, "failed to allocate memory\n"); err = -ENOMEM; goto fail; } err = sg_alloc_table_from_pages(*sgt, pages, DIV_ROUND_UP(size, PAGE_SIZE), 0, size, GFP_KERNEL); if (err) { - dev_err(d, "failed to allocate sg_table\n"); + nvgpu_err(g, "failed to allocate sg_table\n"); goto fail; } sg_dma_address((*sgt)->sgl) = iova; @@ -3049,7 +3040,7 @@ static int update_gmmu_level_locked(struct vm_gk20a *vm, /* get cpu access to the ptes */ err = map_gmmu_pages(g, next_pte); if (err) { - gk20a_err(dev_from_vm(vm), + nvgpu_err(g, "couldn't map ptes for update as=%d", vm_aspace_id(vm)); return err; @@ -3113,7 +3104,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm, err = map_gmmu_pages(g, &vm->pdb); if (err) { - gk20a_err(dev_from_vm(vm), + nvgpu_err(g, "couldn't map ptes for update as=%d", vm_aspace_id(vm)); return err; @@ -3284,14 +3275,14 @@ void gk20a_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer, void gk20a_vm_unmap(struct vm_gk20a *vm, u64 offset) { - struct device *d = dev_from_vm(vm); + struct gk20a *g = vm->mm->g; struct mapped_buffer_node *mapped_buffer; nvgpu_mutex_acquire(&vm->update_gmmu_lock); mapped_buffer = find_mapped_buffer_locked(vm->mapped_buffers, offset); if (!mapped_buffer) { nvgpu_mutex_release(&vm->update_gmmu_lock); - gk20a_err(d, "invalid addr to unmap 0x%llx", offset); + nvgpu_err(g, "invalid addr to unmap 0x%llx", offset); return; } @@ -4195,14 +4186,13 @@ void gk20a_deinit_vm(struct vm_gk20a *vm) int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) { - struct device *dev = dev_from_gk20a(g); int err; gk20a_dbg_fn(""); err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block); if (err) { - gk20a_err(dev, "%s: memory allocation failed\n", __func__); + nvgpu_err(g, "%s: memory allocation failed\n", __func__); return err; } @@ -4462,8 +4452,7 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g) } while (!nvgpu_timeout_expired(&timeout)); if (nvgpu_timeout_peek_expired(&timeout)) - gk20a_warn(dev_from_gk20a(g), - "l2_system_invalidate too many retries"); + nvgpu_warn(g, "l2_system_invalidate too many retries"); trace_gk20a_mm_l2_invalidate_done(g->name); } diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c index 547ba924..38b8da9c 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "gk20a.h" #include "gr_gk20a.h" @@ -314,7 +315,7 @@ static void printtrace(struct pmu_gk20a *pmu) trace = (char *)tracebuffer; trace1 = (u32 *)tracebuffer; - gk20a_err(dev_from_gk20a(g), "Dump pmutrace"); + nvgpu_err(g, "Dump pmutrace"); for (i = 0; i < GK20A_PMU_TRACE_BUFSIZE; i += 0x40) { for (j = 0; j < 0x40; j++) if (trace1[(i / 4) + j]) @@ -335,7 +336,7 @@ static void printtrace(struct pmu_gk20a *pmu) m += k + 2; } scnprintf((buf + count), 0x40, "%s", (trace+i+20+m)); - gk20a_err(dev_from_gk20a(g), "%s", buf); + nvgpu_err(g, "%s", buf); } nvgpu_kfree(g, tracebuffer); } @@ -2184,8 +2185,7 @@ int gk20a_init_pmu(struct pmu_gk20a *pmu) get_pmu_sequence_out_alloc_ptr_v0; break; default: - gk20a_err(dev_from_gk20a(gk20a_from_pmu(pmu)), - "PMU code version not supported version: %d\n", + nvgpu_err(g, "PMU code version not supported version: %d\n", pmu->desc->app_version); err = -EINVAL; goto fail_pmu_seq; @@ -2217,14 +2217,12 @@ void pmu_copy_from_dmem(struct pmu_gk20a *pmu, u32 *dst_u32 = (u32*)dst; if (size == 0) { - gk20a_err(dev_from_gk20a(g), - "size is zero"); + nvgpu_err(g, "size is zero"); return; } if (src & 0x3) { - gk20a_err(dev_from_gk20a(g), - "src (0x%08x) not 4-byte aligned", src); + nvgpu_err(g, "src (0x%08x) not 4-byte aligned", src); return; } @@ -2263,14 +2261,12 @@ void pmu_copy_to_dmem(struct pmu_gk20a *pmu, u32 *src_u32 = (u32*)src; if (size == 0) { - gk20a_err(dev_from_gk20a(g), - "size is zero"); + nvgpu_err(g, "size is zero"); return; } if (dst & 0x3) { - gk20a_err(dev_from_gk20a(g), - "dst (0x%08x) not 4-byte aligned", dst); + nvgpu_err(g, "dst (0x%08x) not 4-byte aligned", dst); return; } @@ -2300,8 +2296,7 @@ void pmu_copy_to_dmem(struct pmu_gk20a *pmu, data = gk20a_readl(g, pwr_falcon_dmemc_r(port)) & addr_mask; size = ALIGN(size, 4); if (data != ((dst + size) & addr_mask)) { - gk20a_err(dev_from_gk20a(g), - "copy failed. bytes written %d, expected %d", + nvgpu_err(g, "copy failed. bytes written %d, expected %d", data - dst, size); } nvgpu_mutex_release(&pmu->pmu_copy_lock); @@ -2432,7 +2427,7 @@ int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable) } while (!nvgpu_timeout_expired(&timeout)); g->ops.mc.disable(g, mc_enable_pwr_enabled_f()); - gk20a_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout"); + nvgpu_err(g, "Falcon mem scrubbing timeout"); return -ETIMEDOUT; } else { @@ -2615,8 +2610,7 @@ static int pmu_seq_acquire(struct pmu_gk20a *pmu, index = find_first_zero_bit(pmu->pmu_seq_tbl, sizeof(pmu->pmu_seq_tbl)); if (index >= sizeof(pmu->pmu_seq_tbl)) { - gk20a_err(dev_from_gk20a(g), - "no free sequence available"); + nvgpu_err(g, "no free sequence available"); nvgpu_mutex_release(&pmu->pmu_seq_lock); return -EAGAIN; } @@ -2787,7 +2781,7 @@ int pmu_mutex_acquire(struct pmu_gk20a *pmu, u32 id, u32 *token) gk20a_readl(g, pwr_pmu_mutex_id_r())); if (data == pwr_pmu_mutex_id_value_init_v() || data == pwr_pmu_mutex_id_value_not_avail_v()) { - gk20a_warn(dev_from_gk20a(g), + nvgpu_warn(g, "fail to generate mutex token: val 0x%08x", owner); usleep_range(20, 40); @@ -2844,8 +2838,7 @@ int pmu_mutex_release(struct pmu_gk20a *pmu, u32 id, u32 *token) gk20a_readl(g, pwr_pmu_mutex_r(mutex->index))); if (*token != owner) { - gk20a_err(dev_from_gk20a(g), - "requester 0x%08x NOT match owner 0x%08x", + nvgpu_err(g, "requester 0x%08x NOT match owner 0x%08x", *token, owner); return -EINVAL; } @@ -2953,8 +2946,7 @@ static int pmu_queue_push(struct pmu_gk20a *pmu, gk20a_dbg_fn(""); if (!queue->opened && queue->oflag == OFLAG_WRITE){ - gk20a_err(dev_from_gk20a(gk20a_from_pmu(pmu)), - "queue not opened for write"); + nvgpu_err(gk20a_from_pmu(pmu), "queue not opened for write"); return -EINVAL; } @@ -2972,8 +2964,7 @@ static int pmu_queue_pop(struct pmu_gk20a *pmu, *bytes_read = 0; if (!queue->opened && queue->oflag == OFLAG_READ){ - gk20a_err(dev_from_gk20a(gk20a_from_pmu(pmu)), - "queue not opened for read"); + nvgpu_err(gk20a_from_pmu(pmu), "queue not opened for read"); return -EINVAL; } @@ -2989,7 +2980,7 @@ static int pmu_queue_pop(struct pmu_gk20a *pmu, used = queue->offset + queue->size - tail; if (size > used) { - gk20a_warn(dev_from_gk20a(gk20a_from_pmu(pmu)), + nvgpu_warn(gk20a_from_pmu(pmu), "queue size smaller than request read"); size = used; } @@ -3008,8 +2999,7 @@ static void pmu_queue_rewind(struct pmu_gk20a *pmu, gk20a_dbg_fn(""); if (!queue->opened) { - gk20a_err(dev_from_gk20a(gk20a_from_pmu(pmu)), - "queue not opened"); + nvgpu_err(gk20a_from_pmu(pmu), "queue not opened"); return; } @@ -3132,7 +3122,6 @@ static int gk20a_prepare_ucode(struct gk20a *g) { struct pmu_gk20a *pmu = &g->pmu; int err = 0; - struct device *d = dev_from_gk20a(g); struct mm_gk20a *mm = &g->mm; struct vm_gk20a *vm = &mm->pmu.vm; @@ -3141,7 +3130,7 @@ static int gk20a_prepare_ucode(struct gk20a *g) pmu->fw = nvgpu_request_firmware(g, GK20A_PMU_UCODE_IMAGE, 0); if (!pmu->fw) { - gk20a_err(d, "failed to load pmu ucode!!"); + nvgpu_err(g, "failed to load pmu ucode!!"); return err; } @@ -3173,7 +3162,6 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g) struct pmu_gk20a *pmu = &g->pmu; struct mm_gk20a *mm = &g->mm; struct vm_gk20a *vm = &mm->pmu.vm; - struct device *d = dev_from_gk20a(g); unsigned int i; int err = 0; u8 *ptr; @@ -3228,7 +3216,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g) err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE, &pmu->seq_buf); if (err) { - gk20a_err(d, "failed to allocate memory\n"); + nvgpu_err(g, "failed to allocate memory\n"); goto err_free_seq; } @@ -3245,7 +3233,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g) err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE, &pmu->trace_buf); if (err) { - gk20a_err(d, "failed to allocate pmu trace buffer\n"); + nvgpu_err(g, "failed to allocate pmu trace buffer\n"); goto err_free_seq_buf; } @@ -3275,7 +3263,7 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg, gk20a_dbg_pmu("reply PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS"); if (status != 0) { - gk20a_err(dev_from_gk20a(g), "PGENG cmd aborted"); + nvgpu_err(g, "PGENG cmd aborted"); /* TBD: disable ELPG */ return; } @@ -3283,7 +3271,7 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg, pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED); if ((!pmu->buf_loaded) && (pmu->pmu_state == PMU_STATE_LOADING_PG_BUF)) - gk20a_err(dev_from_gk20a(g), "failed to load PGENG buffer"); + nvgpu_err(g, "failed to load PGENG buffer"); else { schedule_work(&pmu->pg_init); } @@ -3571,7 +3559,7 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg, gk20a_dbg_fn(""); if (status != 0) { - gk20a_err(dev_from_gk20a(g), "ELPG cmd aborted"); + nvgpu_err(g, "ELPG cmd aborted"); /* TBD: disable ELPG */ return; } @@ -3615,7 +3603,7 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg, } break; default: - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "unsupported ELPG message : 0x%04x", elpg_msg->msg); } @@ -3630,7 +3618,7 @@ static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg, gk20a_dbg_fn(""); if (status != 0) { - gk20a_err(dev_from_gk20a(g), "ELPG cmd aborted"); + nvgpu_err(g, "ELPG cmd aborted"); /* TBD: disable ELPG */ return; } @@ -3769,7 +3757,7 @@ static u8 get_perfmon_id(struct pmu_gk20a *pmu) break; #endif default: - gk20a_err(g->dev, "no support for %x", ver); + nvgpu_err(g, "no support for %x", ver); BUG(); } @@ -3837,8 +3825,7 @@ static int pmu_init_perfmon(struct pmu_gk20a *pmu) pmu->sample_buffer = nvgpu_alloc(&pmu->dmem, 2 * sizeof(u16)); if (!pmu->sample_buffer) { - gk20a_err(dev_from_gk20a(g), - "failed to allocate perfmon sample buffer"); + nvgpu_err(g, "failed to allocate perfmon sample buffer"); return -ENOMEM; } @@ -3893,8 +3880,7 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu, pmu_copy_from_dmem(pmu, tail, (u8 *)&msg->hdr, PMU_MSG_HDR_SIZE, 0); if (msg->hdr.unit_id != PMU_UNIT_INIT) { - gk20a_err(dev_from_gk20a(g), - "expecting init msg"); + nvgpu_err(g, "expecting init msg"); return -EINVAL; } @@ -3902,8 +3888,7 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu, (u8 *)&msg->msg, msg->hdr.size - PMU_MSG_HDR_SIZE, 0); if (msg->msg.init.msg_type != PMU_INIT_MSG_TYPE_PMU_INIT) { - gk20a_err(dev_from_gk20a(g), - "expecting init msg"); + nvgpu_err(g, "expecting init msg"); return -EINVAL; } @@ -3970,8 +3955,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue, err = pmu_queue_open_read(pmu, queue); if (err) { - gk20a_err(dev_from_gk20a(g), - "fail to open queue %d for read", queue->id); + nvgpu_err(g, "fail to open queue %d for read", queue->id); *status = err; return false; } @@ -3979,8 +3963,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue, err = pmu_queue_pop(pmu, queue, &msg->hdr, PMU_MSG_HDR_SIZE, &bytes_read); if (err || bytes_read != PMU_MSG_HDR_SIZE) { - gk20a_err(dev_from_gk20a(g), - "fail to read msg from queue %d", queue->id); + nvgpu_err(g, "fail to read msg from queue %d", queue->id); *status = err | -EINVAL; goto clean_up; } @@ -3991,7 +3974,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue, err = pmu_queue_pop(pmu, queue, &msg->hdr, PMU_MSG_HDR_SIZE, &bytes_read); if (err || bytes_read != PMU_MSG_HDR_SIZE) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "fail to read msg from queue %d", queue->id); *status = err | -EINVAL; goto clean_up; @@ -3999,8 +3982,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue, } if (!PMU_UNIT_ID_IS_VALID(msg->hdr.unit_id)) { - gk20a_err(dev_from_gk20a(g), - "read invalid unit_id %d from queue %d", + nvgpu_err(g, "read invalid unit_id %d from queue %d", msg->hdr.unit_id, queue->id); *status = -EINVAL; goto clean_up; @@ -4011,7 +3993,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue, err = pmu_queue_pop(pmu, queue, &msg->msg, read_size, &bytes_read); if (err || bytes_read != read_size) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "fail to read msg from queue %d", queue->id); *status = err; goto clean_up; @@ -4020,8 +4002,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue, err = pmu_queue_close(pmu, queue, true); if (err) { - gk20a_err(dev_from_gk20a(g), - "fail to close queue %d", queue->id); + nvgpu_err(g, "fail to close queue %d", queue->id); *status = err; return false; } @@ -4031,8 +4012,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue, clean_up: err = pmu_queue_close(pmu, queue, false); if (err) - gk20a_err(dev_from_gk20a(g), - "fail to close queue %d", queue->id); + nvgpu_err(g, "fail to close queue %d", queue->id); return false; } @@ -4049,23 +4029,20 @@ static int pmu_response_handle(struct pmu_gk20a *pmu, seq = &pmu->seq[msg->hdr.seq_id]; if (seq->state != PMU_SEQ_STATE_USED && seq->state != PMU_SEQ_STATE_CANCELLED) { - gk20a_err(dev_from_gk20a(g), - "msg for an unknown sequence %d", seq->id); + nvgpu_err(g, "msg for an unknown sequence %d", seq->id); return -EINVAL; } if (msg->hdr.unit_id == PMU_UNIT_RC && msg->msg.rc.msg_type == PMU_RC_MSG_TYPE_UNHANDLED_CMD) { - gk20a_err(dev_from_gk20a(g), - "unhandled cmd: seq %d", seq->id); + nvgpu_err(g, "unhandled cmd: seq %d", seq->id); } else if (seq->state != PMU_SEQ_STATE_CANCELLED) { if (seq->msg) { if (seq->msg->hdr.size >= msg->hdr.size) { memcpy(seq->msg, msg, msg->hdr.size); } else { - gk20a_err(dev_from_gk20a(g), - "sequence %d msg buffer too small", + nvgpu_err(g, "sequence %d msg buffer too small", seq->id); } } @@ -4158,7 +4135,7 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), &pmu->zbc_save_done, 1); if (!pmu->zbc_save_done) - gk20a_err(dev_from_gk20a(g), "ZBC save timeout"); + nvgpu_err(g, "ZBC save timeout"); } static int pmu_perfmon_start_sampling(struct pmu_gk20a *pmu) @@ -4451,118 +4428,118 @@ void pmu_dump_falcon_stats(struct pmu_gk20a *pmu) struct gk20a *g = gk20a_from_pmu(pmu); unsigned int i; - gk20a_err(dev_from_gk20a(g), "pwr_falcon_os_r : %d", + nvgpu_err(g, "pwr_falcon_os_r : %d", gk20a_readl(g, pwr_falcon_os_r())); - gk20a_err(dev_from_gk20a(g), "pwr_falcon_cpuctl_r : 0x%x", + nvgpu_err(g, "pwr_falcon_cpuctl_r : 0x%x", gk20a_readl(g, pwr_falcon_cpuctl_r())); - gk20a_err(dev_from_gk20a(g), "pwr_falcon_idlestate_r : 0x%x", + nvgpu_err(g, "pwr_falcon_idlestate_r : 0x%x", gk20a_readl(g, pwr_falcon_idlestate_r())); - gk20a_err(dev_from_gk20a(g), "pwr_falcon_mailbox0_r : 0x%x", + nvgpu_err(g, "pwr_falcon_mailbox0_r : 0x%x", gk20a_readl(g, pwr_falcon_mailbox0_r())); - gk20a_err(dev_from_gk20a(g), "pwr_falcon_mailbox1_r : 0x%x", + nvgpu_err(g, "pwr_falcon_mailbox1_r : 0x%x", gk20a_readl(g, pwr_falcon_mailbox1_r())); - gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqstat_r : 0x%x", + nvgpu_err(g, "pwr_falcon_irqstat_r : 0x%x", gk20a_readl(g, pwr_falcon_irqstat_r())); - gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqmode_r : 0x%x", + nvgpu_err(g, "pwr_falcon_irqmode_r : 0x%x", gk20a_readl(g, pwr_falcon_irqmode_r())); - gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqmask_r : 0x%x", + nvgpu_err(g, "pwr_falcon_irqmask_r : 0x%x", gk20a_readl(g, pwr_falcon_irqmask_r())); - gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqdest_r : 0x%x", + nvgpu_err(g, "pwr_falcon_irqdest_r : 0x%x", gk20a_readl(g, pwr_falcon_irqdest_r())); for (i = 0; i < pwr_pmu_mailbox__size_1_v(); i++) - gk20a_err(dev_from_gk20a(g), "pwr_pmu_mailbox_r(%d) : 0x%x", + nvgpu_err(g, "pwr_pmu_mailbox_r(%d) : 0x%x", i, gk20a_readl(g, pwr_pmu_mailbox_r(i))); for (i = 0; i < pwr_pmu_debug__size_1_v(); i++) - gk20a_err(dev_from_gk20a(g), "pwr_pmu_debug_r(%d) : 0x%x", + nvgpu_err(g, "pwr_pmu_debug_r(%d) : 0x%x", i, gk20a_readl(g, pwr_pmu_debug_r(i))); for (i = 0; i < 6/*NV_PPWR_FALCON_ICD_IDX_RSTAT__SIZE_1*/; i++) { gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), pwr_pmu_falcon_icd_cmd_opc_rstat_f() | pwr_pmu_falcon_icd_cmd_idx_f(i)); - gk20a_err(dev_from_gk20a(g), "pmu_rstat (%d) : 0x%x", + nvgpu_err(g, "pmu_rstat (%d) : 0x%x", i, gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); } i = gk20a_readl(g, pwr_pmu_bar0_error_status_r()); - gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_error_status_r : 0x%x", i); + nvgpu_err(g, "pwr_pmu_bar0_error_status_r : 0x%x", i); if (i != 0) { - gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_addr_r : 0x%x", + nvgpu_err(g, "pwr_pmu_bar0_addr_r : 0x%x", gk20a_readl(g, pwr_pmu_bar0_addr_r())); - gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_data_r : 0x%x", + nvgpu_err(g, "pwr_pmu_bar0_data_r : 0x%x", gk20a_readl(g, pwr_pmu_bar0_data_r())); - gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_timeout_r : 0x%x", + nvgpu_err(g, "pwr_pmu_bar0_timeout_r : 0x%x", gk20a_readl(g, pwr_pmu_bar0_timeout_r())); - gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_ctl_r : 0x%x", + nvgpu_err(g, "pwr_pmu_bar0_ctl_r : 0x%x", gk20a_readl(g, pwr_pmu_bar0_ctl_r())); } i = gk20a_readl(g, pwr_pmu_bar0_fecs_error_r()); - gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_fecs_error_r : 0x%x", i); + nvgpu_err(g, "pwr_pmu_bar0_fecs_error_r : 0x%x", i); i = gk20a_readl(g, pwr_falcon_exterrstat_r()); - gk20a_err(dev_from_gk20a(g), "pwr_falcon_exterrstat_r : 0x%x", i); + nvgpu_err(g, "pwr_falcon_exterrstat_r : 0x%x", i); if (pwr_falcon_exterrstat_valid_v(i) == pwr_falcon_exterrstat_valid_true_v()) { - gk20a_err(dev_from_gk20a(g), "pwr_falcon_exterraddr_r : 0x%x", + nvgpu_err(g, "pwr_falcon_exterraddr_r : 0x%x", gk20a_readl(g, pwr_falcon_exterraddr_r())); - gk20a_err(dev_from_gk20a(g), "pmc_enable : 0x%x", + nvgpu_err(g, "pmc_enable : 0x%x", gk20a_readl(g, mc_enable_r())); } - gk20a_err(dev_from_gk20a(g), "pwr_falcon_engctl_r : 0x%x", + nvgpu_err(g, "pwr_falcon_engctl_r : 0x%x", gk20a_readl(g, pwr_falcon_engctl_r())); - gk20a_err(dev_from_gk20a(g), "pwr_falcon_curctx_r : 0x%x", + nvgpu_err(g, "pwr_falcon_curctx_r : 0x%x", gk20a_readl(g, pwr_falcon_curctx_r())); - gk20a_err(dev_from_gk20a(g), "pwr_falcon_nxtctx_r : 0x%x", + nvgpu_err(g, "pwr_falcon_nxtctx_r : 0x%x", gk20a_readl(g, pwr_falcon_nxtctx_r())); gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), pwr_pmu_falcon_icd_cmd_opc_rreg_f() | pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_IMB)); - gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_IMB : 0x%x", + nvgpu_err(g, "PMU_FALCON_REG_IMB : 0x%x", gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), pwr_pmu_falcon_icd_cmd_opc_rreg_f() | pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_DMB)); - gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_DMB : 0x%x", + nvgpu_err(g, "PMU_FALCON_REG_DMB : 0x%x", gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), pwr_pmu_falcon_icd_cmd_opc_rreg_f() | pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_CSW)); - gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_CSW : 0x%x", + nvgpu_err(g, "PMU_FALCON_REG_CSW : 0x%x", gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), pwr_pmu_falcon_icd_cmd_opc_rreg_f() | pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_CTX)); - gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_CTX : 0x%x", + nvgpu_err(g, "PMU_FALCON_REG_CTX : 0x%x", gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), pwr_pmu_falcon_icd_cmd_opc_rreg_f() | pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_EXCI)); - gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_EXCI : 0x%x", + nvgpu_err(g, "PMU_FALCON_REG_EXCI : 0x%x", gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); for (i = 0; i < 4; i++) { gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), pwr_pmu_falcon_icd_cmd_opc_rreg_f() | pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_PC)); - gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_PC : 0x%x", + nvgpu_err(g, "PMU_FALCON_REG_PC : 0x%x", gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), pwr_pmu_falcon_icd_cmd_opc_rreg_f() | pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_SP)); - gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_SP : 0x%x", + nvgpu_err(g, "PMU_FALCON_REG_SP : 0x%x", gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); } - gk20a_err(dev_from_gk20a(g), "elpg stat: %d\n", + nvgpu_err(g, "elpg stat: %d\n", pmu->elpg_stat); /* PMU may crash due to FECS crash. Dump FECS status */ @@ -4600,8 +4577,7 @@ void gk20a_pmu_isr(struct gk20a *g) } if (intr & pwr_falcon_irqstat_halt_true_f()) { - gk20a_err(dev_from_gk20a(g), - "pmu halt intr not implemented"); + nvgpu_err(g, "pmu halt intr not implemented"); pmu_dump_falcon_stats(pmu); if (gk20a_readl(g, pwr_pmu_mailbox_r (PMU_MODE_MISMATCH_STATUS_MAILBOX_R)) == @@ -4610,7 +4586,7 @@ void gk20a_pmu_isr(struct gk20a *g) g->ops.pmu.dump_secure_fuses(g); } if (intr & pwr_falcon_irqstat_exterr_true_f()) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "pmu exterr intr not implemented. Clearing interrupt."); pmu_dump_falcon_stats(pmu); @@ -4692,7 +4668,7 @@ static bool pmu_validate_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd, return true; invalid_cmd: - gk20a_err(dev_from_gk20a(g), "invalid pmu cmd :\n" + nvgpu_err(g, "invalid pmu cmd :\n" "queue_id=%d,\n" "cmd_size=%d, cmd_unit_id=%d, msg=%p, msg_size=%d,\n" "payload in=%p, in_size=%d, in_offset=%d,\n" @@ -4736,8 +4712,7 @@ static int pmu_write_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd, clean_up: if (err) - gk20a_err(dev_from_gk20a(g), - "fail to write cmd to queue %d", queue_id); + nvgpu_err(g, "fail to write cmd to queue %d", queue_id); else gk20a_dbg_fn("done"); @@ -4762,7 +4737,7 @@ int gk20a_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, err = nvgpu_dma_alloc_map_vid(vm, size, mem); if (err) { - gk20a_err(g->dev, "memory allocation failed"); + nvgpu_err(g, "memory allocation failed"); return -ENOMEM; } @@ -4778,7 +4753,7 @@ int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, err = nvgpu_dma_alloc_map_sys(vm, size, mem); if (err) { - gk20a_err(g->dev, "failed to allocate memory\n"); + nvgpu_err(g, "failed to allocate memory\n"); return -ENOMEM; } @@ -4806,14 +4781,11 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, if ((!cmd) || (!seq_desc) || (!pmu->pmu_ready)) { if (!cmd) - gk20a_warn(dev_from_gk20a(g), - "%s(): PMU cmd buffer is NULL", __func__); + nvgpu_warn(g, "%s(): PMU cmd buffer is NULL", __func__); else if (!seq_desc) - gk20a_warn(dev_from_gk20a(g), - "%s(): Seq descriptor is NULL", __func__); + nvgpu_warn(g, "%s(): Seq descriptor is NULL", __func__); else - gk20a_warn(dev_from_gk20a(g), - "%s(): PMU is not ready", __func__); + nvgpu_warn(g, "%s(): PMU is not ready", __func__); WARN_ON(1); return -EINVAL; @@ -5044,9 +5016,9 @@ int gk20a_pmu_enable_elpg(struct gk20a *g) /* something is not right if we end up in following code path */ if (unlikely(pmu->elpg_refcnt > 1)) { - gk20a_warn(dev_from_gk20a(g), - "%s(): possible elpg refcnt mismatch. elpg refcnt=%d", - __func__, pmu->elpg_refcnt); + nvgpu_warn(g, + "%s(): possible elpg refcnt mismatch. elpg refcnt=%d", + __func__, pmu->elpg_refcnt); WARN_ON(1); } @@ -5102,9 +5074,9 @@ int gk20a_pmu_disable_elpg(struct gk20a *g) pmu->elpg_refcnt--; if (pmu->elpg_refcnt > 0) { - gk20a_warn(dev_from_gk20a(g), - "%s(): possible elpg refcnt mismatch. elpg refcnt=%d", - __func__, pmu->elpg_refcnt); + nvgpu_warn(g, + "%s(): possible elpg refcnt mismatch. elpg refcnt=%d", + __func__, pmu->elpg_refcnt); WARN_ON(1); ret = 0; goto exit_unlock; @@ -5123,8 +5095,7 @@ int gk20a_pmu_disable_elpg(struct gk20a *g) &pmu->elpg_stat, PMU_ELPG_STAT_ON); if (pmu->elpg_stat != PMU_ELPG_STAT_ON) { - gk20a_err(dev_from_gk20a(g), - "ELPG_ALLOW_ACK failed, elpg_stat=%d", + nvgpu_err(g, "ELPG_ALLOW_ACK failed, elpg_stat=%d", pmu->elpg_stat); pmu_dump_elpg_stats(pmu); pmu_dump_falcon_stats(pmu); @@ -5175,8 +5146,7 @@ int gk20a_pmu_disable_elpg(struct gk20a *g) gk20a_get_gr_idle_timeout(g), ptr, PMU_ELPG_STAT_OFF); if (*ptr != PMU_ELPG_STAT_OFF) { - gk20a_err(dev_from_gk20a(g), - "ELPG_DISALLOW_ACK failed"); + nvgpu_err(g, "ELPG_DISALLOW_ACK failed"); pmu_dump_elpg_stats(pmu); pmu_dump_falcon_stats(pmu); ret = -EBUSY; diff --git a/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c b/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c index 752ee121..08198776 100644 --- a/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c @@ -20,6 +20,8 @@ #include "gk20a.h" +#include + #include #include #include @@ -121,6 +123,5 @@ void gk20a_priv_ring_isr(struct gk20a *g) } while (cmd != pri_ringmaster_command_cmd_no_cmd_v() && --retry); if (retry <= 0) - gk20a_warn(dev_from_gk20a(g), - "priv ringmaster cmd ack too many retries"); + nvgpu_warn(g, "priv ringmaster cmd ack too many retries"); } diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c index 9fa7514a..b19b16d7 100644 --- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c @@ -25,7 +25,7 @@ #include "dbg_gpu_gk20a.h" #include "regops_gk20a.h" - +#include static int regop_bsearch_range_cmp(const void *pkey, const void *pelem) { @@ -408,7 +408,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, ops, num_ops); if (!ok) { - dev_err(dbg_s->dev, "invalid op(s)"); + nvgpu_err(g, "invalid op(s)"); err = -EINVAL; /* each op has its own err/status */ goto clean_up; @@ -527,7 +527,6 @@ static int validate_reg_op_info(struct dbg_session_gk20a *dbg_s, break; default: op->status |= REGOP(STATUS_UNSUPPORTED_OP); - /*gk20a_err(dbg_s->dev, "Invalid regops op %d!", op->op);*/ err = -EINVAL; break; } @@ -546,7 +545,6 @@ static int validate_reg_op_info(struct dbg_session_gk20a *dbg_s, */ default: op->status |= REGOP(STATUS_INVALID_TYPE); - /*gk20a_err(dbg_s->dev, "Invalid regops type %d!", op->type);*/ err = -EINVAL; break; } @@ -593,7 +591,7 @@ static bool check_whitelists(struct dbg_session_gk20a *dbg_s, } else if (op->type == REGOP(TYPE_GR_CTX)) { /* it's a context-relative op */ if (!ch) { - gk20a_err(dbg_s->dev, "can't perform ctx regop unless bound"); + nvgpu_err(dbg_s->g, "can't perform ctx regop unless bound"); op->status = REGOP(STATUS_UNSUPPORTED_OP); return valid; } @@ -637,7 +635,7 @@ static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s, /* support only 24-bit 4-byte aligned offsets */ if (offset & 0xFF000003) { - gk20a_err(dbg_s->dev, "invalid regop offset: 0x%x\n", offset); + nvgpu_err(dbg_s->g, "invalid regop offset: 0x%x\n", offset); op->status |= REGOP(STATUS_INVALID_OFFSET); return -EINVAL; } @@ -675,7 +673,7 @@ static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s, } if (!valid) { - gk20a_err(dbg_s->dev, "invalid regop offset: 0x%x\n", offset); + nvgpu_err(dbg_s->g, "invalid regop offset: 0x%x\n", offset); op->status |= REGOP(STATUS_INVALID_OFFSET); return -EINVAL; } diff --git a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c index a58de920..1d7fd313 100644 --- a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c @@ -26,6 +26,7 @@ #include #include +#include #include "ctxsw_trace_gk20a.h" #include "gk20a.h" @@ -330,8 +331,7 @@ static int gk20a_sched_dev_ioctl_get_tsg(struct gk20a_sched_ctrl *sched, nvgpu_mutex_acquire(&sched->status_lock); if (NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) { - gk20a_warn(dev_from_gk20a(g), - "tsgid=%d already referenced", tsgid); + nvgpu_warn(g, "tsgid=%d already referenced", tsgid); /* unlock status_lock as gk20a_tsg_release locks it */ nvgpu_mutex_release(&sched->status_lock); kref_put(&tsg->refcount, gk20a_tsg_release); @@ -363,8 +363,7 @@ static int gk20a_sched_dev_ioctl_put_tsg(struct gk20a_sched_ctrl *sched, nvgpu_mutex_acquire(&sched->status_lock); if (!NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) { nvgpu_mutex_release(&sched->status_lock); - gk20a_warn(dev_from_gk20a(g), - "tsgid=%d not previously referenced", tsgid); + nvgpu_warn(g, "tsgid=%d not previously referenced", tsgid); return -ENXIO; } NVGPU_SCHED_CLR(tsgid, sched->ref_tsg_bitmap); diff --git a/drivers/gpu/nvgpu/gk20a/sim_gk20a.c b/drivers/gpu/nvgpu/gk20a/sim_gk20a.c index 76d29ee5..8951d5a4 100644 --- a/drivers/gpu/nvgpu/gk20a/sim_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/sim_gk20a.c @@ -20,6 +20,8 @@ #include "gk20a.h" +#include + #include static inline void sim_writel(struct gk20a *g, u32 r, u32 v) @@ -65,7 +67,7 @@ static void gk20a_remove_sim_support(struct sim_gk20a *s) gk20a_free_sim_support(g); } -static int alloc_and_kmap_iopage(struct device *d, +static int alloc_and_kmap_iopage(struct gk20a *g, void **kvaddr, u64 *phys, struct page **page) @@ -75,14 +77,14 @@ static int alloc_and_kmap_iopage(struct device *d, if (!*page) { err = -ENOMEM; - dev_err(d, "couldn't allocate io page\n"); + nvgpu_err(g, "couldn't allocate io page\n"); goto fail; } *kvaddr = kmap(*page); if (!*kvaddr) { err = -ENOMEM; - dev_err(d, "couldn't kmap io page\n"); + nvgpu_err(g, "couldn't kmap io page\n"); goto fail; } *phys = page_to_phys(*page); @@ -105,27 +107,27 @@ int gk20a_init_sim_support(struct platform_device *pdev) g->sim.regs = gk20a_ioremap_resource(pdev, GK20A_SIM_IORESOURCE_MEM, &g->sim.reg_mem); if (IS_ERR(g->sim.regs)) { - dev_err(dev, "failed to remap gk20a sim regs\n"); + nvgpu_err(g, "failed to remap gk20a sim regs\n"); err = PTR_ERR(g->sim.regs); goto fail; } /* allocate sim event/msg buffers */ - err = alloc_and_kmap_iopage(dev, &g->sim.send_bfr.kvaddr, + err = alloc_and_kmap_iopage(g, &g->sim.send_bfr.kvaddr, &g->sim.send_bfr.phys, &g->sim.send_bfr.page); - err = err || alloc_and_kmap_iopage(dev, &g->sim.recv_bfr.kvaddr, + err = err || alloc_and_kmap_iopage(g, &g->sim.recv_bfr.kvaddr, &g->sim.recv_bfr.phys, &g->sim.recv_bfr.page); - err = err || alloc_and_kmap_iopage(dev, &g->sim.msg_bfr.kvaddr, + err = err || alloc_and_kmap_iopage(g, &g->sim.msg_bfr.kvaddr, &g->sim.msg_bfr.phys, &g->sim.msg_bfr.page); if (!(g->sim.send_bfr.kvaddr && g->sim.recv_bfr.kvaddr && g->sim.msg_bfr.kvaddr)) { - dev_err(dev, "couldn't allocate all sim buffers\n"); + nvgpu_err(g, "couldn't allocate all sim buffers\n"); goto fail; } @@ -275,7 +277,7 @@ static int rpc_recv_poll(struct gk20a *g) (u64)recv_phys_addr_lo << PAGE_SHIFT; if (recv_phys_addr != g->sim.msg_bfr.phys) { - dev_err(dev_from_gk20a(g), "%s Error in RPC reply\n", + nvgpu_err(g, "%s Error in RPC reply\n", __func__); return -1; } @@ -302,21 +304,21 @@ static int issue_rpc_and_wait(struct gk20a *g) err = rpc_send_message(g); if (err) { - dev_err(dev_from_gk20a(g), "%s failed rpc_send_message\n", + nvgpu_err(g, "%s failed rpc_send_message\n", __func__); return err; } err = rpc_recv_poll(g); if (err) { - dev_err(dev_from_gk20a(g), "%s failed rpc_recv_poll\n", + nvgpu_err(g, "%s failed rpc_recv_poll\n", __func__); return err; } /* Now check if RPC really succeeded */ if (*sim_msg_hdr(g, sim_msg_result_r()) != sim_msg_result_success_v()) { - dev_err(dev_from_gk20a(g), "%s received failed status!\n", + nvgpu_err(g, "%s received failed status!\n", __func__); return -(*sim_msg_hdr(g, sim_msg_result_r())); } diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c index 6281e4ad..5f07ade7 100644 --- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c @@ -15,6 +15,7 @@ */ #include +#include #include "gk20a.h" #include "tsg_gk20a.h" @@ -93,7 +94,7 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, if (tsg->runlist_id == FIFO_INVAL_TSG_ID) tsg->runlist_id = ch->runlist_id; else if (tsg->runlist_id != ch->runlist_id) { - gk20a_err(dev_from_gk20a(tsg->g), + nvgpu_err(tsg->g, "Error: TSG channel should be share same runlist ch[%d] tsg[%d]\n", ch->runlist_id, tsg->runlist_id); return -EINVAL; @@ -260,8 +261,7 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g) if (g->ops.fifo.tsg_open) { err = g->ops.fifo.tsg_open(tsg); if (err) { - gk20a_err(dev_from_gk20a(g), - "tsg %d fifo open failed %d", + nvgpu_err(g, "tsg %d fifo open failed %d", tsg->tsgid, err); goto clean_up; } -- cgit v1.2.2