From 5405070ecd27ce462babc1dff231fec5cd8bd6b7 Mon Sep 17 00:00:00 2001 From: Terje Bergstrom Date: Thu, 6 Apr 2017 13:10:30 -0700 Subject: gpu: nvgpu: vgpu: Use new error macros gk20a_err() and gk20a_warn() require a struct device pointer, which is not portable across operating systems. The new nvgpu_err() and nvgpu_warn() macros take struct gk20a pointer. Convert code to use the more portable macros. JIRA NVGPU-16 Change-Id: I071e8c50959bfa81730ca964d912bc69f9c7e6ad Signed-off-by: Terje Bergstrom Reviewed-on: http://git-master/r/1457355 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/vgpu/css_vgpu.c | 4 +-- drivers/gpu/nvgpu/vgpu/fifo_vgpu.c | 15 +++++----- drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c | 7 ++--- drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c | 7 ++--- drivers/gpu/nvgpu/vgpu/gr_vgpu.c | 41 +++++++++++----------------- drivers/gpu/nvgpu/vgpu/mm_vgpu.c | 11 ++++---- drivers/gpu/nvgpu/vgpu/tsg_vgpu.c | 4 +-- drivers/gpu/nvgpu/vgpu/vgpu.c | 26 +++++++++--------- 8 files changed, 51 insertions(+), 64 deletions(-) (limited to 'drivers/gpu/nvgpu') diff --git a/drivers/gpu/nvgpu/vgpu/css_vgpu.c b/drivers/gpu/nvgpu/vgpu/css_vgpu.c index 5a80f24d..142d9ce1 100644 --- a/drivers/gpu/nvgpu/vgpu/css_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/css_vgpu.c @@ -157,7 +157,7 @@ static int vgpu_css_attach(struct channel_gk20a *ch, err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); err = err ? err : msg.ret; if (err) - gk20a_err(dev_from_gk20a(g), "%s failed", __func__); + nvgpu_err(g, "failed"); else cs_client->perfmon_start = p->perfmon_start; @@ -185,7 +185,7 @@ static int vgpu_css_detach(struct channel_gk20a *ch, err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); err = err ? err : msg.ret; if (err) - gk20a_err(dev_from_gk20a(g), "%s failed", __func__); + nvgpu_err(g, "failed"); return err; } diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c index e2883f7c..e775abbb 100644 --- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c @@ -78,7 +78,7 @@ static int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) p->pid = (u64)current->tgid; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); if (err || msg.ret) { - gk20a_err(dev_from_gk20a(g), "fail"); + nvgpu_err(g, "fail"); return -ENOMEM; } @@ -365,21 +365,20 @@ static int vgpu_init_fifo_setup_hw(struct gk20a *g) smp_mb(); if (v1 != gk20a_bar1_readl(g, bar1_vaddr)) { - gk20a_err(dev_from_gk20a(g), "bar1 broken @ gk20a!"); + nvgpu_err(g, "bar1 broken @ gk20a!"); return -EINVAL; } gk20a_bar1_writel(g, bar1_vaddr, v2); if (v2 != gk20a_bar1_readl(g, bar1_vaddr)) { - gk20a_err(dev_from_gk20a(g), "bar1 broken @ gk20a!"); + nvgpu_err(g, "bar1 broken @ gk20a!"); return -EINVAL; } /* is it visible to the cpu? */ if (*cpu_vaddr != v2) { - gk20a_err(dev_from_gk20a(g), - "cpu didn't see bar1 write @ %p!", + nvgpu_err(g, "cpu didn't see bar1 write @ %p!", cpu_vaddr); } @@ -426,7 +425,7 @@ static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid) err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); if (err || msg.ret) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "preempt channel %d failed\n", hw_chid); err = -ENOMEM; } @@ -450,7 +449,7 @@ static int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) err = err ? err : msg.ret; if (err) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "preempt tsg %u failed\n", tsgid); } @@ -722,7 +721,7 @@ int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info) if (!ch) return 0; - gk20a_err(dev_from_gk20a(g), "fifo intr (%d) on ch %u", + nvgpu_err(g, "fifo intr (%d) on ch %u", info->type, info->chid); trace_gk20a_channel_reset(ch->hw_chid, ch->tsgid); diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c index 1a5811fe..cc9c46bf 100644 --- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c +++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c @@ -88,7 +88,7 @@ static int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g, err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, class, graphics_preempt_mode, compute_preempt_mode); if (err) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "set_ctxsw_preemption_mode failed"); goto fail; } @@ -254,7 +254,7 @@ static int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g, return err; fail: - gk20a_err(dev_from_gk20a(g), "%s failed %d", __func__, err); + nvgpu_err(g, "%s failed %d", __func__, err); return err; } @@ -297,8 +297,7 @@ static int vgpu_gr_gp10b_set_preemption_mode(struct channel_gk20a *ch, graphics_preempt_mode, compute_preempt_mode); if (err) { - gk20a_err(dev_from_gk20a(g), - "set_ctxsw_preemption_mode failed"); + nvgpu_err(g, "set_ctxsw_preemption_mode failed"); return err; } } else { diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c index 2da18fb8..cfda867c 100644 --- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c +++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c @@ -54,7 +54,6 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm, enum nvgpu_aperture aperture) { int err = 0; - struct device *d = dev_from_vm(vm); struct gk20a *g = gk20a_from_vm(vm); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_as_map_ex_params *p = &msg.params.as_map_ex; @@ -82,7 +81,7 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm, if (!map_offset) { map_offset = gk20a_vm_alloc_va(vm, size, pgsz_idx); if (!map_offset) { - gk20a_err(d, "failed to allocate va space"); + nvgpu_err(g, "failed to allocate va space"); err = -ENOMEM; goto fail; } @@ -140,7 +139,7 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm, vm->gmmu_page_sizes[gmmu_page_size_big]) { pgsz_idx = gmmu_page_size_big; } else { - gk20a_err(d, "invalid kernel page size %d\n", + nvgpu_err(g, "invalid kernel page size %d\n", page_size); goto fail; } @@ -171,7 +170,7 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm, fail: if (handle) tegra_gr_comm_oob_put_ptr(handle); - gk20a_err(d, "%s: failed with err=%d\n", __func__, err); + nvgpu_err(g, "%s: failed with err=%d\n", __func__, err); return 0; } diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c index 612e50e7..102adae3 100644 --- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c @@ -294,7 +294,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g, err = err ? err : msg.ret; if (unlikely(err)) { - gk20a_err(dev_from_gk20a(g), "fail to alloc gr_ctx"); + nvgpu_err(g, "fail to alloc gr_ctx"); gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, gr_ctx->mem.size, gmmu_page_size_kernel); nvgpu_kfree(g, gr_ctx); @@ -485,15 +485,13 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, /* an address space needs to have been bound at this point.*/ if (!gk20a_channel_as_bound(c)) { - gk20a_err(dev_from_gk20a(g), - "not bound to address space at time" + nvgpu_err(g, "not bound to address space at time" " of grctx allocation"); return -EINVAL; } if (!g->ops.gr.is_valid_class(g, args->class_num)) { - gk20a_err(dev_from_gk20a(g), - "invalid obj class 0x%x", args->class_num); + nvgpu_err(g, "invalid obj class 0x%x", args->class_num); err = -EINVAL; goto out; } @@ -512,15 +510,14 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, if (!err) err = vgpu_gr_ch_bind_gr_ctx(c); if (err) { - gk20a_err(dev_from_gk20a(g), - "fail to allocate gr ctx buffer"); + nvgpu_err(g, "fail to allocate gr ctx buffer"); goto out; } } else { /*TBD: needs to be more subtle about which is * being allocated as some are allowed to be * allocated along same channel */ - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "too many classes alloc'd on same channel"); err = -EINVAL; goto out; @@ -536,7 +533,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, if (!err) err = vgpu_gr_tsg_bind_gr_ctx(tsg); if (err) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "fail to allocate TSG gr ctx buffer, err=%d", err); gk20a_vm_put(tsg->vm); tsg->vm = NULL; @@ -547,8 +544,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, ch_ctx->gr_ctx = tsg->tsg_gr_ctx; err = vgpu_gr_ch_bind_gr_ctx(c); if (err) { - gk20a_err(dev_from_gk20a(g), - "fail to bind gr ctx buffer"); + nvgpu_err(g, "fail to bind gr ctx buffer"); goto out; } } @@ -556,8 +552,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, /* commit gr ctx buffer */ err = vgpu_gr_commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va); if (err) { - gk20a_err(dev_from_gk20a(g), - "fail to commit gr ctx buffer"); + nvgpu_err(g, "fail to commit gr ctx buffer"); goto out; } @@ -565,8 +560,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, if (ch_ctx->patch_ctx.mem.pages == NULL) { err = vgpu_gr_alloc_channel_patch_ctx(g, c); if (err) { - gk20a_err(dev_from_gk20a(g), - "fail to allocate patch buffer"); + nvgpu_err(g, "fail to allocate patch buffer"); goto out; } } @@ -575,8 +569,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, if (!ch_ctx->global_ctx_buffer_mapped) { err = vgpu_gr_map_global_ctx_buffers(g, c); if (err) { - gk20a_err(dev_from_gk20a(g), - "fail to map global ctx buffer"); + nvgpu_err(g, "fail to map global ctx buffer"); goto out; } gr_gk20a_elpg_protected_call(g, @@ -588,8 +581,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, err = gr_gk20a_elpg_protected_call(g, vgpu_gr_load_golden_ctx_image(g, c)); if (err) { - gk20a_err(dev_from_gk20a(g), - "fail to load golden ctx image"); + nvgpu_err(g, "fail to load golden ctx image"); goto out; } c->first_init = true; @@ -602,7 +594,7 @@ out: can be reused so no need to release them. 2. golden image load is a one time thing so if they pass, no need to undo. */ - gk20a_err(dev_from_gk20a(g), "fail"); + nvgpu_err(g, "fail"); return err; } @@ -651,7 +643,7 @@ static int vgpu_gr_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) g->ops.gr.init_fs_state(g); return 0; cleanup: - gk20a_err(dev_from_gk20a(g), "%s: out of memory", __func__); + nvgpu_err(g, "out of memory"); nvgpu_kfree(g, gr->gpc_tpc_count); gr->gpc_tpc_count = NULL; @@ -905,7 +897,7 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g) return 0; clean_up: - gk20a_err(dev_from_gk20a(g), "fail"); + nvgpu_err(g, "fail"); vgpu_remove_gr_support(gr); return err; } @@ -928,8 +920,7 @@ int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info) if (info->type != TEGRA_VGPU_GR_INTR_NOTIFY && info->type != TEGRA_VGPU_GR_INTR_SEMAPHORE) - gk20a_err(dev_from_gk20a(g), "gr intr (%d) on ch %u", - info->type, info->chid); + nvgpu_err(g, "gr intr (%d) on ch %u", info->type, info->chid); switch (info->type) { case TEGRA_VGPU_GR_INTR_NOTIFY: @@ -1186,7 +1177,7 @@ void vgpu_gr_handle_sm_esr_event(struct gk20a *g, struct nvgpu_dbg_gpu_sm_error_state_record *sm_error_states; if (info->sm_id >= g->gr.no_of_sm) { - gk20a_err(g->dev, "invalid smd_id %d / %d", + nvgpu_err(g, "invalid smd_id %d / %d", info->sm_id, g->gr.no_of_sm); return; } diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c index b12f8a53..3c139df5 100644 --- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c @@ -105,7 +105,7 @@ static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm, map_offset = gk20a_vm_alloc_va(vm, size, pgsz_idx); if (!map_offset) { - gk20a_err(d, "failed to allocate va space\n"); + nvgpu_err(g, "failed to allocate va space\n"); err = -ENOMEM; goto fail; } @@ -133,7 +133,7 @@ static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm, vm->gmmu_page_sizes[gmmu_page_size_big]) { pgsz_idx = gmmu_page_size_big; } else { - gk20a_err(d, "invalid kernel page size %d\n", + nvgpu_err(g, "invalid kernel page size %d\n", page_size); goto fail; } @@ -155,7 +155,7 @@ static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm, return map_offset; fail: - gk20a_err(d, "%s: failed with err=%d\n", __func__, err); + nvgpu_err(g, "%s: failed with err=%d\n", __func__, err); return 0; } @@ -294,7 +294,7 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share, gk20a_dbg_fn(""); if (userspace_managed) { - gk20a_err(dev_from_gk20a(g), + nvgpu_err(g, "userspace-managed address spaces not yet supported"); return -ENOSYS; } @@ -506,8 +506,7 @@ static void vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb) { gk20a_dbg_fn(""); - gk20a_err(g->dev, "%s: call to RM server not supported", - __func__); + nvgpu_err(g, "call to RM server not supported"); } static void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable) diff --git a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c index e668d1ed..8a0276f7 100644 --- a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c @@ -38,7 +38,7 @@ static int vgpu_tsg_open(struct tsg_gk20a *tsg) err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); err = err ? err : msg.ret; if (err) { - gk20a_err(dev_from_gk20a(tsg->g), + nvgpu_err(tsg->g, "vgpu_tsg_open failed, tsgid %d", tsg->tsgid); } @@ -66,7 +66,7 @@ static int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg, err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); err = err ? err : msg.ret; if (err) { - gk20a_err(dev_from_gk20a(tsg->g), + nvgpu_err(tsg->g, "vgpu_tsg_bind_channel failed, ch %d tsgid %d", ch->hw_chid, tsg->tsgid); gk20a_tsg_unbind_channel(ch); diff --git a/drivers/gpu/nvgpu/vgpu/vgpu.c b/drivers/gpu/nvgpu/vgpu/vgpu.c index b32df08d..4cb7c52e 100644 --- a/drivers/gpu/nvgpu/vgpu/vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/vgpu.c @@ -106,7 +106,7 @@ static void vgpu_handle_channel_event(struct gk20a *g, { if (info->id >= g->fifo.num_channels || info->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX) { - gk20a_err(g->dev, "invalid channel event"); + nvgpu_err(g, "invalid channel event"); return; } @@ -118,7 +118,7 @@ static void vgpu_handle_channel_event(struct gk20a *g, struct channel_gk20a *ch = &g->fifo.channel[info->id]; if (!gk20a_channel_get(ch)) { - gk20a_err(g->dev, "invalid channel %d for event %d", + nvgpu_err(g, "invalid channel %d for event %d", (int)info->id, (int)info->event_id); return; } @@ -179,7 +179,7 @@ static int vgpu_intr_thread(void *dev_id) vgpu_gr_handle_sm_esr_event(g, &msg->info.sm_esr); break; default: - gk20a_err(g->dev, "unknown event %u", msg->event); + nvgpu_err(g, "unknown event %u", msg->event); break; } @@ -349,8 +349,7 @@ static int vgpu_read_ptimer(struct gk20a *g, u64 *value) if (!err) *value = p->time; else - gk20a_err(dev_from_gk20a(g), - "vgpu read ptimer failed, err=%d", err); + nvgpu_err(g, "vgpu read ptimer failed, err=%d", err); return err; } @@ -393,7 +392,7 @@ static int vgpu_init_hal(struct gk20a *g) err = vgpu_gp10b_init_hal(g); break; default: - gk20a_err(g->dev, "no support for %x", ver); + nvgpu_err(g, "no support for %x", ver); err = -ENODEV; break; } @@ -423,25 +422,25 @@ int vgpu_pm_finalize_poweron(struct device *dev) err = vgpu_init_mm_support(g); if (err) { - gk20a_err(dev, "failed to init gk20a mm"); + nvgpu_err(g, "failed to init gk20a mm"); goto done; } err = vgpu_init_fifo_support(g); if (err) { - gk20a_err(dev, "failed to init gk20a fifo"); + nvgpu_err(g, "failed to init gk20a fifo"); goto done; } err = vgpu_init_gr_support(g); if (err) { - gk20a_err(dev, "failed to init gk20a gr"); + nvgpu_err(g, "failed to init gk20a gr"); goto done; } err = g->ops.chip_init_gpu_characteristics(g); if (err) { - gk20a_err(dev, "failed to init gk20a gpu characteristics"); + nvgpu_err(g, "failed to init gk20a gpu characteristics"); goto done; } @@ -459,6 +458,7 @@ static int vgpu_qos_notify(struct notifier_block *nb, struct gk20a_scale_profile *profile = container_of(nb, struct gk20a_scale_profile, qos_notify_block); + struct gk20a *g = get_gk20a(profile->dev); struct tegra_vgpu_cmd_msg msg = {}; struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate; u32 max_freq; @@ -474,7 +474,7 @@ static int vgpu_qos_notify(struct notifier_block *nb, err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); err = err ? err : msg.ret; if (err) - gk20a_err(profile->dev, "%s failed, err=%d", __func__, err); + nvgpu_err(g, "%s failed, err=%d", __func__, err); return NOTIFY_OK; /* need notify call further */ } @@ -536,13 +536,13 @@ static int vgpu_get_constants(struct gk20a *g) err = err ? err : msg.ret; if (unlikely(err)) { - gk20a_err(g->dev, "%s failed, err=%d", __func__, err); + nvgpu_err(g, "%s failed, err=%d", __func__, err); return err; } if (unlikely(p->gpc_count > TEGRA_VGPU_MAX_GPC_COUNT || p->max_tpc_per_gpc_count > TEGRA_VGPU_MAX_TPC_COUNT_PER_GPC)) { - gk20a_err(g->dev, "gpc_count %d max_tpc_per_gpc %d overflow", + nvgpu_err(g, "gpc_count %d max_tpc_per_gpc %d overflow", (int)p->gpc_count, (int)p->max_tpc_per_gpc_count); return -EINVAL; } -- cgit v1.2.2