From e1438818b90c5b0d73aae800b12bd6b36aec5142 Mon Sep 17 00:00:00 2001 From: Richard Zhao Date: Thu, 21 Jul 2016 16:51:40 -0700 Subject: gpu: nvgpu: vgpu: add vgpu private data and helper functions Move vgpu private data to a dedicated structure and allocate it at probe time. Also add virt_handle helper function which is used everywhere. JIRA VFND-2103 Change-Id: I125911420be72ca9be948125d8357fa85d1d3afd Signed-off-by: Richard Zhao Reviewed-on: http://git-master/r/1185206 GVS: Gerrit_Virtual_Submit Reviewed-by: Vladislav Buzov --- drivers/gpu/nvgpu/gk20a/platform_gk20a.h | 3 +- drivers/gpu/nvgpu/vgpu/dbg_vgpu.c | 6 +- drivers/gpu/nvgpu/vgpu/fecs_trace_vgpu.c | 8 +-- drivers/gpu/nvgpu/vgpu/fifo_vgpu.c | 48 +++++---------- drivers/gpu/nvgpu/vgpu/gk20a/vgpu_gr_gk20a.c | 3 +- drivers/gpu/nvgpu/vgpu/gm20b/vgpu_gr_gm20b.c | 3 +- drivers/gpu/nvgpu/vgpu/gr_vgpu.c | 89 ++++++++++------------------ drivers/gpu/nvgpu/vgpu/ltc_vgpu.c | 15 ++--- drivers/gpu/nvgpu/vgpu/mm_vgpu.c | 36 ++++------- drivers/gpu/nvgpu/vgpu/tsg_vgpu.c | 9 +-- drivers/gpu/nvgpu/vgpu/vgpu.c | 30 +++++----- drivers/gpu/nvgpu/vgpu/vgpu.h | 36 +++++++++++ 12 files changed, 130 insertions(+), 156 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/nvgpu/gk20a/platform_gk20a.h b/drivers/gpu/nvgpu/gk20a/platform_gk20a.h index 396d8db2..93158cc7 100644 --- a/drivers/gpu/nvgpu/gk20a/platform_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/platform_gk20a.h @@ -204,8 +204,7 @@ struct gk20a_platform { bool virtual_dev; #ifdef CONFIG_TEGRA_GR_VIRTUALIZATION - u64 virt_handle; - struct task_struct *intr_handler; + void *vgpu_priv; #endif /* source frequency for ptimer in hz */ u32 ptimer_src_freq; diff --git a/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c b/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c index 4e4379f7..c312c419 100644 --- a/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c @@ -27,7 +27,6 @@ static int vgpu_exec_regops(struct dbg_session_gk20a *dbg_s, u64 num_ops) { struct channel_gk20a *ch; - struct gk20a_platform *platform = gk20a_get_platform(dbg_s->g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_reg_ops_params *p = &msg.params.reg_ops; void *oob; @@ -54,7 +53,7 @@ static int vgpu_exec_regops(struct dbg_session_gk20a *dbg_s, memcpy(oob, ops, ops_size); msg.cmd = TEGRA_VGPU_CMD_REG_OPS; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(dbg_s->g); ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); p->handle = ch ? ch->virt_ctx : 0; p->num_ops = num_ops; @@ -71,7 +70,6 @@ fail: static int vgpu_dbg_set_powergate(struct dbg_session_gk20a *dbg_s, __u32 mode) { - struct gk20a_platform *platform = gk20a_get_platform(dbg_s->g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_set_powergate_params *p = &msg.params.set_powergate; int err = 0; @@ -95,7 +93,7 @@ static int vgpu_dbg_set_powergate(struct dbg_session_gk20a *dbg_s, __u32 mode) } msg.cmd = TEGRA_VGPU_CMD_SET_POWERGATE; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(dbg_s->g); p->mode = mode; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); err = err ? err : msg.ret; diff --git a/drivers/gpu/nvgpu/vgpu/fecs_trace_vgpu.c b/drivers/gpu/nvgpu/vgpu/fecs_trace_vgpu.c index bd1a7451..634932b7 100644 --- a/drivers/gpu/nvgpu/vgpu/fecs_trace_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/fecs_trace_vgpu.c @@ -106,7 +106,7 @@ static int vgpu_fecs_trace_enable(struct gk20a *g) { struct tegra_vgpu_cmd_msg msg = { .cmd = TEGRA_VGPU_CMD_FECS_TRACE_ENABLE, - .handle = gk20a_get_platform(g->dev)->virt_handle, + .handle = vgpu_get_handle(g), }; int err; @@ -120,7 +120,7 @@ static int vgpu_fecs_trace_disable(struct gk20a *g) { struct tegra_vgpu_cmd_msg msg = { .cmd = TEGRA_VGPU_CMD_FECS_TRACE_DISABLE, - .handle = gk20a_get_platform(g->dev)->virt_handle, + .handle = vgpu_get_handle(g), }; int err; @@ -134,7 +134,7 @@ static int vgpu_fecs_trace_poll(struct gk20a *g) { struct tegra_vgpu_cmd_msg msg = { .cmd = TEGRA_VGPU_CMD_FECS_TRACE_POLL, - .handle = gk20a_get_platform(g->dev)->virt_handle, + .handle = vgpu_get_handle(g), }; int err; @@ -190,7 +190,7 @@ static int vgpu_fecs_trace_set_filter(struct gk20a *g, { struct tegra_vgpu_cmd_msg msg = { .cmd = TEGRA_VGPU_CMD_FECS_TRACE_SET_FILTER, - .handle = gk20a_get_platform(g->dev)->virt_handle, + .handle = vgpu_get_handle(g), }; struct tegra_vgpu_fecs_trace_filter *p = &msg.params.fecs_trace_filter; int err; diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c index 8d3a5e9f..baab42c8 100644 --- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c @@ -23,7 +23,6 @@ static void vgpu_channel_bind(struct channel_gk20a *ch) { - struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_channel_config_params *p = &msg.params.channel_config; @@ -32,7 +31,7 @@ static void vgpu_channel_bind(struct channel_gk20a *ch) gk20a_dbg_info("bind channel %d", ch->hw_chid); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(ch->g); p->handle = ch->virt_ctx; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); @@ -42,7 +41,6 @@ static void vgpu_channel_bind(struct channel_gk20a *ch) static void vgpu_channel_unbind(struct channel_gk20a *ch) { - struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev); gk20a_dbg_fn(""); @@ -53,7 +51,7 @@ static void vgpu_channel_unbind(struct channel_gk20a *ch) int err; msg.cmd = TEGRA_VGPU_CMD_CHANNEL_UNBIND; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(ch->g); p->handle = ch->virt_ctx; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); @@ -64,7 +62,6 @@ static void vgpu_channel_unbind(struct channel_gk20a *ch) static int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; int err; @@ -72,7 +69,7 @@ static int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->id = ch->hw_chid; p->pid = (u64)current->tgid; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); @@ -88,7 +85,6 @@ static int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) static void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; int err; @@ -96,7 +92,7 @@ static void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch) gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWCTX; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->handle = ch->virt_ctx; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); @@ -104,7 +100,6 @@ static void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch) static void vgpu_channel_enable(struct channel_gk20a *ch) { - struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_channel_config_params *p = &msg.params.channel_config; @@ -113,7 +108,7 @@ static void vgpu_channel_enable(struct channel_gk20a *ch) gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ENABLE; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(ch->g); p->handle = ch->virt_ctx; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); @@ -121,7 +116,6 @@ static void vgpu_channel_enable(struct channel_gk20a *ch) static void vgpu_channel_disable(struct channel_gk20a *ch) { - struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_channel_config_params *p = &msg.params.channel_config; @@ -130,7 +124,7 @@ static void vgpu_channel_disable(struct channel_gk20a *ch) gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_DISABLE; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(ch->g); p->handle = ch->virt_ctx; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); @@ -139,7 +133,6 @@ static void vgpu_channel_disable(struct channel_gk20a *ch) static int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base, u32 gpfifo_entries, u32 flags) { - struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev); struct device __maybe_unused *d = dev_from_gk20a(ch->g); struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(d); struct tegra_vgpu_cmd_msg msg; @@ -149,7 +142,7 @@ static int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base, gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SETUP_RAMFC; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(ch->g); p->handle = ch->virt_ctx; p->gpfifo_va = gpfifo_base; p->num_entries = gpfifo_entries; @@ -242,7 +235,6 @@ clean_up_runlist: static int vgpu_init_fifo_setup_sw(struct gk20a *g) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct fifo_gk20a *f = &g->fifo; struct device *d = dev_from_gk20a(g); int chid, err = 0; @@ -256,7 +248,7 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g) f->g = g; - err = vgpu_get_attribute(platform->virt_handle, + err = vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_NUM_CHANNELS, &f->num_channels); if (err) @@ -411,7 +403,6 @@ int vgpu_init_fifo_support(struct gk20a *g) static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct fifo_gk20a *f = &g->fifo; struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_channel_config_params *p = @@ -421,7 +412,7 @@ static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid) gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_PREEMPT; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->handle = f->channel[hw_chid].virt_ctx; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); @@ -436,7 +427,6 @@ static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid) static int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_tsg_preempt_params *p = &msg.params.tsg_preempt; @@ -445,7 +435,7 @@ static int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->tsg_id = tsgid; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); err = err ? err : msg.ret; @@ -490,7 +480,6 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, u32 hw_chid, bool add, bool wait_for_finish) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct fifo_gk20a *f = &g->fifo; struct fifo_runlist_info_gk20a *runlist; u16 *runlist_entry = NULL; @@ -529,7 +518,7 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, } else /* suspend to remove all channels */ count = 0; - return vgpu_submit_runlist(platform->virt_handle, runlist_id, + return vgpu_submit_runlist(vgpu_get_handle(g), runlist_id, runlist->mem[0].cpu_va, count); } @@ -566,7 +555,6 @@ static int vgpu_fifo_wait_engine_idle(struct gk20a *g) static int vgpu_channel_set_priority(struct channel_gk20a *ch, u32 priority) { - struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_channel_priority_params *p = &msg.params.channel_priority; @@ -575,7 +563,7 @@ static int vgpu_channel_set_priority(struct channel_gk20a *ch, u32 priority) gk20a_dbg_info("channel %d set priority %u", ch->hw_chid, priority); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_PRIORITY; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(ch->g); p->handle = ch->virt_ctx; p->priority = priority; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); @@ -589,7 +577,6 @@ static int vgpu_fifo_tsg_set_runlist_interleave(struct gk20a *g, u32 runlist_id, u32 new_level) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct tegra_vgpu_cmd_msg msg = {0}; struct tegra_vgpu_tsg_runlist_interleave_params *p = &msg.params.tsg_interleave; @@ -598,7 +585,7 @@ static int vgpu_fifo_tsg_set_runlist_interleave(struct gk20a *g, gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_TSG_SET_RUNLIST_INTERLEAVE; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->tsg_id = tsgid; p->level = new_level; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); @@ -612,7 +599,6 @@ static int vgpu_fifo_set_runlist_interleave(struct gk20a *g, u32 runlist_id, u32 new_level) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_channel_runlist_interleave_params *p = &msg.params.channel_interleave; @@ -627,7 +613,7 @@ static int vgpu_fifo_set_runlist_interleave(struct gk20a *g, ch = &g->fifo.channel[id]; msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_RUNLIST_INTERLEAVE; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(ch->g); p->handle = ch->virt_ctx; p->level = new_level; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); @@ -637,7 +623,6 @@ static int vgpu_fifo_set_runlist_interleave(struct gk20a *g, static int vgpu_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice) { - struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_channel_timeslice_params *p = &msg.params.channel_timeslice; @@ -646,7 +631,7 @@ static int vgpu_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice) gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_TIMESLICE; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(ch->g); p->handle = ch->virt_ctx; p->timeslice_us = timeslice; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); @@ -659,7 +644,6 @@ static int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch, bool verbose) struct tsg_gk20a *tsg = NULL; struct channel_gk20a *ch_tsg = NULL; struct gk20a *g = ch->g; - struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev); struct tegra_vgpu_cmd_msg msg = {0}; struct tegra_vgpu_channel_config_params *p = &msg.params.channel_config; @@ -687,7 +671,7 @@ static int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch, bool verbose) } msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FORCE_RESET; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(ch->g); p->handle = ch->virt_ctx; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); diff --git a/drivers/gpu/nvgpu/vgpu/gk20a/vgpu_gr_gk20a.c b/drivers/gpu/nvgpu/vgpu/gk20a/vgpu_gr_gk20a.c index 2cfe16da..a8526457 100644 --- a/drivers/gpu/nvgpu/vgpu/gk20a/vgpu_gr_gk20a.c +++ b/drivers/gpu/nvgpu/vgpu/gk20a/vgpu_gr_gk20a.c @@ -20,12 +20,11 @@ static void vgpu_gk20a_detect_sm_arch(struct gk20a *g) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); u32 v = 0, raw_version, version = 0; gk20a_dbg_fn(""); - if (vgpu_get_attribute(platform->virt_handle, + if (vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_GPC0_TPC0_SM_ARCH, &v)) gk20a_err(dev_from_gk20a(g), "failed to retrieve SM arch"); diff --git a/drivers/gpu/nvgpu/vgpu/gm20b/vgpu_gr_gm20b.c b/drivers/gpu/nvgpu/vgpu/gm20b/vgpu_gr_gm20b.c index fb1f31d8..aaddd218 100644 --- a/drivers/gpu/nvgpu/vgpu/gm20b/vgpu_gr_gm20b.c +++ b/drivers/gpu/nvgpu/vgpu/gm20b/vgpu_gr_gm20b.c @@ -20,12 +20,11 @@ static void vgpu_gm20b_detect_sm_arch(struct gk20a *g) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); u32 v = 0; gk20a_dbg_fn(""); - if (vgpu_get_attribute(platform->virt_handle, + if (vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_GPC0_TPC0_SM_ARCH, &v)) gk20a_err(dev_from_gk20a(g), "failed to retrieve SM arch"); diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c index f395ac1e..5477bca0 100644 --- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c @@ -19,7 +19,6 @@ static int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va) { - struct gk20a_platform *platform = gk20a_get_platform(c->g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; int err; @@ -27,7 +26,7 @@ static int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va) gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_CTX; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(c->g); p->handle = c->virt_ctx; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); @@ -37,7 +36,6 @@ static int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va) static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g, struct channel_gk20a *c, bool patch) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; int err; @@ -45,7 +43,7 @@ static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g, gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_GLOBAL_CTX; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->handle = c->virt_ctx; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); @@ -56,7 +54,6 @@ static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g, static int vgpu_gr_load_golden_ctx_image(struct gk20a *g, struct channel_gk20a *c) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; int err; @@ -64,7 +61,7 @@ static int vgpu_gr_load_golden_ctx_image(struct gk20a *g, gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_LOAD_GR_GOLDEN_CTX; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->handle = c->virt_ctx; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); @@ -73,15 +70,14 @@ static int vgpu_gr_load_golden_ctx_image(struct gk20a *g, int vgpu_gr_init_ctx_state(struct gk20a *g) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct gr_gk20a *gr = &g->gr; gk20a_dbg_fn(""); - vgpu_get_attribute(platform->virt_handle, + vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_GOLDEN_CTX_SIZE, &g->gr.ctx_vars.golden_image_size); - vgpu_get_attribute(platform->virt_handle, + vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_ZCULL_CTX_SIZE, &g->gr.ctx_vars.zcull_ctxsw_image_size); if (!g->gr.ctx_vars.golden_image_size || @@ -128,7 +124,6 @@ static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g) static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, struct channel_gk20a *c) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; struct vm_gk20a *ch_vm = c->vm; @@ -183,7 +178,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size; msg.cmd = TEGRA_VGPU_CMD_CHANNEL_MAP_GR_GLOBAL_CTX; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->handle = c->virt_ctx; p->cb_va = g_bfr_va[CIRCULAR_VA]; p->attr_va = g_bfr_va[ATTRIBUTE_VA]; @@ -209,7 +204,6 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, static void vgpu_gr_unmap_global_ctx_buffers(struct channel_gk20a *c) { - struct gk20a_platform *platform = gk20a_get_platform(c->g->dev); struct vm_gk20a *ch_vm = c->vm; u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va; u64 *g_bfr_size = c->ch_ctx.global_ctx_buffer_size; @@ -223,7 +217,7 @@ static void vgpu_gr_unmap_global_ctx_buffers(struct channel_gk20a *c) int err; msg.cmd = TEGRA_VGPU_CMD_CHANNEL_UNMAP_GR_GLOBAL_CTX; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(c->g); p->handle = c->virt_ctx; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); @@ -245,7 +239,6 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g, u32 class, u32 flags) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct tegra_vgpu_cmd_msg msg = {0}; struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; struct gr_gk20a *gr = &g->gr; @@ -276,7 +269,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g, } msg.cmd = TEGRA_VGPU_CMD_GR_CTX_ALLOC; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->as_handle = vm->handle; p->gr_ctx_va = gr_ctx->mem.gpu_va; p->class_num = class; @@ -302,13 +295,12 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm, gk20a_dbg_fn(""); if (gr_ctx && gr_ctx->mem.gpu_va) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; int err; msg.cmd = TEGRA_VGPU_CMD_GR_CTX_FREE; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->gr_ctx_handle = gr_ctx->virt_ctx; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); @@ -329,7 +321,6 @@ static void vgpu_gr_free_channel_gr_ctx(struct channel_gk20a *c) static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g, struct channel_gk20a *c) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct patch_desc *patch_ctx = &c->ch_ctx.patch_ctx; struct vm_gk20a *ch_vm = c->vm; struct tegra_vgpu_cmd_msg msg; @@ -346,7 +337,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g, return -ENOMEM; msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_PATCH_CTX; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->handle = c->virt_ctx; p->patch_ctx_va = patch_ctx->mem.gpu_va; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); @@ -361,7 +352,6 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g, static void vgpu_gr_free_channel_patch_ctx(struct channel_gk20a *c) { - struct gk20a_platform *platform = gk20a_get_platform(c->g->dev); struct patch_desc *patch_ctx = &c->ch_ctx.patch_ctx; struct vm_gk20a *ch_vm = c->vm; @@ -373,7 +363,7 @@ static void vgpu_gr_free_channel_patch_ctx(struct channel_gk20a *c) int err; msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_GR_PATCH_CTX; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(c->g); p->handle = c->virt_ctx; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); @@ -386,7 +376,6 @@ static void vgpu_gr_free_channel_patch_ctx(struct channel_gk20a *c) static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c) { - struct gk20a_platform *platform = gk20a_get_platform(c->g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_channel_free_hwpm_ctx *p = &msg.params.free_hwpm_ctx; struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx; @@ -399,7 +388,7 @@ static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c) return; msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWPM_CTX; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(c->g); p->handle = c->virt_ctx; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); @@ -427,7 +416,6 @@ static void vgpu_gr_free_channel_ctx(struct channel_gk20a *c) static int vgpu_gr_ch_bind_gr_ctx(struct channel_gk20a *c) { - struct gk20a_platform *platform = gk20a_get_platform(c->g->dev); struct gr_ctx_desc *gr_ctx = c->ch_ctx.gr_ctx; struct tegra_vgpu_cmd_msg msg = {0}; struct tegra_vgpu_channel_bind_gr_ctx_params *p = @@ -435,7 +423,7 @@ static int vgpu_gr_ch_bind_gr_ctx(struct channel_gk20a *c) int err; msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTX; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(c->g); p->ch_handle = c->virt_ctx; p->gr_ctx_handle = gr_ctx->virt_ctx; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); @@ -447,7 +435,6 @@ static int vgpu_gr_ch_bind_gr_ctx(struct channel_gk20a *c) static int vgpu_gr_tsg_bind_gr_ctx(struct tsg_gk20a *tsg) { - struct gk20a_platform *platform = gk20a_get_platform(tsg->g->dev); struct gr_ctx_desc *gr_ctx = tsg->tsg_gr_ctx; struct tegra_vgpu_cmd_msg msg = {0}; struct tegra_vgpu_tsg_bind_gr_ctx_params *p = @@ -455,7 +442,7 @@ static int vgpu_gr_tsg_bind_gr_ctx(struct tsg_gk20a *tsg) int err; msg.cmd = TEGRA_VGPU_CMD_TSG_BIND_GR_CTX; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(tsg->g); p->tsg_id = tsg->tsgid; p->gr_ctx_handle = gr_ctx->virt_ctx; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); @@ -621,12 +608,11 @@ static int vgpu_gr_free_obj_ctx(struct channel_gk20a *c, static u32 vgpu_gr_get_gpc_tpc_count(struct gk20a *g, u32 gpc_index) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); u32 data; WARN_ON(gpc_index > 0); - if (vgpu_get_attribute(platform->virt_handle, + if (vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_GPC0_TPC_COUNT, &data)) gk20a_err(dev_from_gk20a(g), "failed to retrieve gpc0_tpc_count"); return data; @@ -634,26 +620,25 @@ static u32 vgpu_gr_get_gpc_tpc_count(struct gk20a *g, u32 gpc_index) static int vgpu_gr_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); u32 gpc_index; gk20a_dbg_fn(""); - if (vgpu_get_attribute(platform->virt_handle, + if (vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_GPC_COUNT, &gr->gpc_count)) return -ENOMEM; - if (vgpu_get_attribute(platform->virt_handle, + if (vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_MAX_TPC_PER_GPC_COUNT, &gr->max_tpc_per_gpc_count)) return -ENOMEM; - if (vgpu_get_attribute(platform->virt_handle, + if (vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_MAX_TPC_COUNT, &gr->max_tpc_count)) return -ENOMEM; - if (vgpu_get_attribute(platform->virt_handle, + if (vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_TPC_COUNT, &gr->tpc_count)) return -ENOMEM; @@ -701,7 +686,6 @@ static int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr, struct channel_gk20a *c, u64 zcull_va, u32 mode) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_zcull_bind_params *p = &msg.params.zcull_bind; int err; @@ -709,7 +693,7 @@ static int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr, gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_ZCULL; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->handle = c->virt_ctx; p->zcull_va = zcull_va; p->mode = mode; @@ -721,7 +705,6 @@ static int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr, static int vgpu_gr_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr, struct gr_zcull_info *zcull_params) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_zcull_info_params *p = &msg.params.zcull_info; int err; @@ -729,7 +712,7 @@ static int vgpu_gr_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr, gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_GET_ZCULL_INFO; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); if (err || msg.ret) return -ENOMEM; @@ -752,12 +735,11 @@ static int vgpu_gr_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr, static u32 vgpu_gr_get_gpc_tpc_mask(struct gk20a *g, u32 gpc_index) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); u32 data; WARN_ON(gpc_index > 0); - if (vgpu_get_attribute(platform->virt_handle, + if (vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_GPC0_TPC_MASK, &data)) gk20a_err(dev_from_gk20a(g), "failed to retrieve gpc0_tpc_mask"); @@ -766,12 +748,11 @@ static u32 vgpu_gr_get_gpc_tpc_mask(struct gk20a *g, u32 gpc_index) static u32 vgpu_gr_get_max_fbps_count(struct gk20a *g) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); u32 max_fbps_count = 0; gk20a_dbg_fn(""); - if (vgpu_get_attribute(platform->virt_handle, + if (vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_NUM_FBPS, &max_fbps_count)) gk20a_err(dev_from_gk20a(g), "failed to retrieve num fbps"); @@ -780,12 +761,11 @@ static u32 vgpu_gr_get_max_fbps_count(struct gk20a *g) static u32 vgpu_gr_get_fbp_en_mask(struct gk20a *g) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); u32 fbp_en_mask = 0; gk20a_dbg_fn(""); - if (vgpu_get_attribute(platform->virt_handle, + if (vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_FBP_EN_MASK, &fbp_en_mask)) gk20a_err(dev_from_gk20a(g), "failed to retrieve fbp en mask"); @@ -794,12 +774,11 @@ static u32 vgpu_gr_get_fbp_en_mask(struct gk20a *g) static u32 vgpu_gr_get_max_ltc_per_fbp(struct gk20a *g) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); u32 val = 0; gk20a_dbg_fn(""); - if (vgpu_get_attribute(platform->virt_handle, + if (vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_MAX_LTC_PER_FBP, &val)) gk20a_err(dev_from_gk20a(g), "failed to retrieve max ltc per fbp"); @@ -808,12 +787,11 @@ static u32 vgpu_gr_get_max_ltc_per_fbp(struct gk20a *g) static u32 vgpu_gr_get_max_lts_per_ltc(struct gk20a *g) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); u32 val = 0; gk20a_dbg_fn(""); - if (vgpu_get_attribute(platform->virt_handle, + if (vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_MAX_LTS_PER_LTC, &val)) gk20a_err(dev_from_gk20a(g), "failed to retrieve lts per ltc"); @@ -829,7 +807,6 @@ static u32 *vgpu_gr_rop_l2_en_mask(struct gk20a *g) static int vgpu_gr_add_zbc(struct gk20a *g, struct gr_gk20a *gr, struct zbc_entry *zbc_val) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct tegra_vgpu_cmd_msg msg = {0}; struct tegra_vgpu_zbc_set_table_params *p = &msg.params.zbc_set_table; int err; @@ -837,7 +814,7 @@ static int vgpu_gr_add_zbc(struct gk20a *g, struct gr_gk20a *gr, gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_ZBC_SET_TABLE; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->type = zbc_val->type; p->format = zbc_val->format; @@ -861,7 +838,6 @@ static int vgpu_gr_add_zbc(struct gk20a *g, struct gr_gk20a *gr, static int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr, struct zbc_query_params *query_params) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct tegra_vgpu_cmd_msg msg = {0}; struct tegra_vgpu_zbc_query_table_params *p = &msg.params.zbc_query_table; @@ -870,7 +846,7 @@ static int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr, gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_ZBC_QUERY_TABLE; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->type = query_params->type; p->index_size = query_params->index_size; @@ -1048,7 +1024,6 @@ int vgpu_gr_nonstall_isr(struct gk20a *g, static int vgpu_gr_set_sm_debug_mode(struct gk20a *g, struct channel_gk20a *ch, u64 sms, bool enable) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_sm_debug_mode *p = &msg.params.sm_debug_mode; int err; @@ -1056,7 +1031,7 @@ static int vgpu_gr_set_sm_debug_mode(struct gk20a *g, gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_SET_SM_DEBUG_MODE; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->handle = ch->virt_ctx; p->sms = sms; p->enable = (u32)enable; @@ -1069,7 +1044,6 @@ static int vgpu_gr_set_sm_debug_mode(struct gk20a *g, static int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g, struct channel_gk20a *ch, bool enable) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; int err; @@ -1077,7 +1051,7 @@ static int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g, gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_SMPC_CTXSW_MODE; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->handle = ch->virt_ctx; if (enable) @@ -1094,7 +1068,6 @@ static int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g, static int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g, struct channel_gk20a *ch, bool enable) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; int err; @@ -1102,7 +1075,7 @@ static int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g, gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_HWPM_CTXSW_MODE; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->handle = ch->virt_ctx; /* If we just enabled HWPM context switching, flag this diff --git a/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c b/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c index 1adb8b22..3e3f67c6 100644 --- a/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c @@ -17,12 +17,11 @@ static int vgpu_determine_L2_size_bytes(struct gk20a *g) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); u32 cache_size = 0; gk20a_dbg_fn(""); - if (vgpu_get_attribute(platform->virt_handle, + if (vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_L2_SIZE, &cache_size)) dev_err(dev_from_gk20a(g), "unable to get L2 size\n"); @@ -31,22 +30,21 @@ static int vgpu_determine_L2_size_bytes(struct gk20a *g) static int vgpu_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); u32 max_comptag_lines = 0; int err; gk20a_dbg_fn(""); - err = vgpu_get_attribute(platform->virt_handle, + err = vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_CACHELINE_SIZE, &gr->cacheline_size); - err |= vgpu_get_attribute(platform->virt_handle, + err |= vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_COMPTAGS_PER_CACHELINE, &gr->comptags_per_cacheline); - err |= vgpu_get_attribute(platform->virt_handle, + err |= vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_SLICES_PER_LTC, &gr->slices_per_ltc); - err |= vgpu_get_attribute(platform->virt_handle, + err |= vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_COMPTAG_LINES, &max_comptag_lines); if (err) { dev_err(dev_from_gk20a(g), "failed to get ctags atributes\n"); @@ -65,13 +63,12 @@ static int vgpu_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) static void vgpu_ltc_init_fs_state(struct gk20a *g) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); u32 ltc_count = 0; int err; gk20a_dbg_fn(""); - err = vgpu_get_attribute(platform->virt_handle, + err = vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_LTC_COUNT, <c_count); WARN_ON(err); g->ltc_count = ltc_count; diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c index 6b741cd4..b256598f 100644 --- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c @@ -1,7 +1,7 @@ /* * Virtualized GPU Memory Management * - * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -86,7 +86,6 @@ static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm, int err = 0; struct device *d = dev_from_vm(vm); struct gk20a *g = gk20a_from_vm(vm); - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(d); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_as_map_params *p = &msg.params.as_map; @@ -114,7 +113,7 @@ static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm, prot = TEGRA_VGPU_MAP_PROT_NONE; msg.cmd = TEGRA_VGPU_CMD_AS_MAP; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->handle = vm->handle; p->addr = addr; p->gpu_va = map_offset; @@ -164,7 +163,6 @@ static void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm, struct vm_gk20a_mapping_batch *batch) { struct gk20a *g = gk20a_from_vm(vm); - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_as_map_params *p = &msg.params.as_map; int err; @@ -181,7 +179,7 @@ static void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm, } msg.cmd = TEGRA_VGPU_CMD_AS_UNMAP; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->handle = vm->handle; p->gpu_va = vaddr; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); @@ -195,7 +193,6 @@ static void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm, static void vgpu_vm_remove_support(struct vm_gk20a *vm) { struct gk20a *g = vm->mm->g; - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct mapped_buffer_node *mapped_buffer; struct vm_reserved_va_node *va_node, *va_node_tmp; struct tegra_vgpu_cmd_msg msg; @@ -225,7 +222,7 @@ static void vgpu_vm_remove_support(struct vm_gk20a *vm) } msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->handle = vm->handle; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); @@ -244,7 +241,6 @@ static void vgpu_vm_remove_support(struct vm_gk20a *vm) u64 vgpu_bar1_map(struct gk20a *g, struct sg_table **sgt, u64 size) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev_from_gk20a(g)); u64 addr = g->ops.mm.get_iova_addr(g, (*sgt)->sgl, 0); @@ -253,7 +249,7 @@ u64 vgpu_bar1_map(struct gk20a *g, struct sg_table **sgt, u64 size) int err; msg.cmd = TEGRA_VGPU_CMD_MAP_BAR1; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->addr = addr; p->size = size; p->iova = mapping ? 1 : 0; @@ -320,7 +316,7 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share, vm->va_limit = mm->channel.user_size + mm->channel.kernel_size; msg.cmd = TEGRA_VGPU_CMD_AS_ALLOC_SHARE; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->size = vm->va_limit; p->big_page_size = vm->big_page_size; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); @@ -434,7 +430,7 @@ clean_up_small_allocator: gk20a_alloc_destroy(&vm->vma[gmmu_page_size_small]); clean_up_share: msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->handle = vm->handle; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); @@ -448,7 +444,6 @@ static int vgpu_vm_bind_channel(struct gk20a_as_share *as_share, struct channel_gk20a *ch) { struct vm_gk20a *vm = as_share->vm; - struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_as_bind_share_params *p = &msg.params.as_bind_share; int err; @@ -457,7 +452,7 @@ static int vgpu_vm_bind_channel(struct gk20a_as_share *as_share, ch->vm = vm; msg.cmd = TEGRA_VGPU_CMD_AS_BIND_SHARE; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(ch->g); p->as_handle = vm->handle; p->chan_handle = ch->virt_ctx; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); @@ -485,26 +480,23 @@ static void vgpu_cache_maint(u64 handle, u8 op) static int vgpu_mm_fb_flush(struct gk20a *g) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); gk20a_dbg_fn(""); - vgpu_cache_maint(platform->virt_handle, TEGRA_VGPU_FB_FLUSH); + vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_FB_FLUSH); return 0; } static void vgpu_mm_l2_invalidate(struct gk20a *g) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); gk20a_dbg_fn(""); - vgpu_cache_maint(platform->virt_handle, TEGRA_VGPU_L2_MAINT_INV); + vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_L2_MAINT_INV); } static void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); u8 op; gk20a_dbg_fn(""); @@ -514,13 +506,12 @@ static void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate) else op = TEGRA_VGPU_L2_MAINT_FLUSH; - vgpu_cache_maint(platform->virt_handle, op); + vgpu_cache_maint(vgpu_get_handle(g), op); } static void vgpu_mm_tlb_invalidate(struct vm_gk20a *vm) { struct gk20a *g = gk20a_from_vm(vm); - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_as_invalidate_params *p = &msg.params.as_invalidate; int err; @@ -528,7 +519,7 @@ static void vgpu_mm_tlb_invalidate(struct vm_gk20a *vm) gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_AS_INVALIDATE; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->handle = vm->handle; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); @@ -536,7 +527,6 @@ static void vgpu_mm_tlb_invalidate(struct vm_gk20a *vm) static void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_mmu_debug_mode *p = &msg.params.mmu_debug_mode; int err; @@ -544,7 +534,7 @@ static void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable) gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_SET_MMU_DEBUG_MODE; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); p->enable = (u32)enable; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); diff --git a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c index 820a3db3..2033fd7a 100644 --- a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c @@ -26,7 +26,6 @@ static int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg, struct channel_gk20a *ch) { - struct gk20a_platform *platform = gk20a_get_platform(tsg->g->dev); struct tegra_vgpu_cmd_msg msg = {}; struct tegra_vgpu_tsg_bind_unbind_channel_params *p = &msg.params.tsg_bind_unbind_channel; @@ -39,7 +38,7 @@ static int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg, return err; msg.cmd = TEGRA_VGPU_CMD_TSG_BIND_CHANNEL; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(tsg->g); p->tsg_id = tsg->tsgid; p->ch_handle = ch->virt_ctx; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); @@ -56,7 +55,6 @@ static int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg, static int vgpu_tsg_unbind_channel(struct channel_gk20a *ch) { - struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev); struct tegra_vgpu_cmd_msg msg = {}; struct tegra_vgpu_tsg_bind_unbind_channel_params *p = &msg.params.tsg_bind_unbind_channel; @@ -69,7 +67,7 @@ static int vgpu_tsg_unbind_channel(struct channel_gk20a *ch) return err; msg.cmd = TEGRA_VGPU_CMD_TSG_UNBIND_CHANNEL; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(ch->g); p->ch_handle = ch->virt_ctx; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); err = err ? err : msg.ret; @@ -80,7 +78,6 @@ static int vgpu_tsg_unbind_channel(struct channel_gk20a *ch) static int vgpu_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice) { - struct gk20a_platform *platform = gk20a_get_platform(tsg->g->dev); struct tegra_vgpu_cmd_msg msg = {0}; struct tegra_vgpu_tsg_timeslice_params *p = &msg.params.tsg_timeslice; @@ -89,7 +86,7 @@ static int vgpu_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice) gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_TSG_SET_TIMESLICE; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(tsg->g); p->tsg_id = tsg->tsgid; p->timeslice_us = timeslice; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); diff --git a/drivers/gpu/nvgpu/vgpu/vgpu.c b/drivers/gpu/nvgpu/vgpu/vgpu.c index c03086e1..300ffc98 100644 --- a/drivers/gpu/nvgpu/vgpu/vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/vgpu.c @@ -187,7 +187,7 @@ static int vgpu_intr_thread(void *dev_id) static void vgpu_remove_support(struct device *dev) { struct gk20a *g = get_gk20a(dev); - struct gk20a_platform *platform = gk20a_get_platform(dev); + struct vgpu_priv_data *priv = vgpu_get_priv_data_from_dev(dev); struct tegra_vgpu_intr_msg msg; int err; @@ -208,7 +208,7 @@ static void vgpu_remove_support(struct device *dev) TEGRA_GR_COMM_ID_SELF, TEGRA_VGPU_QUEUE_INTR, &msg, sizeof(msg)); WARN_ON(err); - kthread_stop(platform->intr_handler); + kthread_stop(priv->intr_handler); /* free mappings to registers, etc*/ @@ -271,11 +271,10 @@ int vgpu_pm_prepare_poweroff(struct device *dev) static void vgpu_detect_chip(struct gk20a *g) { struct nvgpu_gpu_characteristics *gpu = &g->gpu_characteristics; - struct gk20a_platform *platform = gk20a_get_platform(g->dev); u32 mc_boot_0_value; - if (vgpu_get_attribute(platform->virt_handle, + if (vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_PMC_BOOT_0, &mc_boot_0_value)) { gk20a_err(dev_from_gk20a(g), "failed to detect chip"); @@ -297,7 +296,6 @@ static void vgpu_detect_chip(struct gk20a *g) static int vgpu_init_gpu_characteristics(struct gk20a *g) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); u32 max_freq; int err; @@ -307,7 +305,7 @@ static int vgpu_init_gpu_characteristics(struct gk20a *g) if (err) return err; - if (vgpu_get_attribute(platform->virt_handle, + if (vgpu_get_attribute(vgpu_get_handle(g), TEGRA_VGPU_ATTRIB_MAX_FREQ, &max_freq)) return -ENOMEM; @@ -318,7 +316,6 @@ static int vgpu_init_gpu_characteristics(struct gk20a *g) static int vgpu_read_ptimer(struct gk20a *g, u64 *value) { - struct gk20a_platform *platform = gk20a_get_platform(g->dev); struct tegra_vgpu_cmd_msg msg = {0}; struct tegra_vgpu_read_ptimer_params *p = &msg.params.read_ptimer; int err; @@ -326,7 +323,7 @@ static int vgpu_read_ptimer(struct gk20a *g, u64 *value) gk20a_dbg_fn(""); msg.cmd = TEGRA_VGPU_CMD_READ_PTIMER; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle(g); err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); err = err ? err : msg.ret; @@ -441,7 +438,6 @@ static int vgpu_qos_notify(struct notifier_block *nb, struct gk20a_scale_profile *profile = container_of(nb, struct gk20a_scale_profile, qos_notify_block); - struct gk20a_platform *platform = gk20a_get_platform(profile->dev); struct tegra_vgpu_cmd_msg msg = {}; struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate; u32 max_freq; @@ -452,7 +448,7 @@ static int vgpu_qos_notify(struct notifier_block *nb, max_freq = (u32)pm_qos_read_max_bound(PM_QOS_GPU_FREQ_BOUNDS); msg.cmd = TEGRA_VGPU_CMD_SET_GPU_CLK_RATE; - msg.handle = platform->virt_handle; + msg.handle = vgpu_get_handle_from_dev(profile->dev); p->rate = max_freq; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); err = err ? err : msg.ret; @@ -510,6 +506,7 @@ int vgpu_probe(struct platform_device *pdev) int err; struct device *dev = &pdev->dev; struct gk20a_platform *platform = gk20a_get_platform(dev); + struct vgpu_priv_data *priv; if (!platform) { dev_err(dev, "no platform data\n"); @@ -518,6 +515,10 @@ int vgpu_probe(struct platform_device *pdev) gk20a_dbg_fn(""); + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + gk20a = kzalloc(sizeof(struct gk20a), GFP_KERNEL); if (!gk20a) { dev_err(dev, "couldn't allocate gk20a support"); @@ -525,6 +526,7 @@ int vgpu_probe(struct platform_device *pdev) } platform->g = gk20a; + platform->vgpu_priv = priv; gk20a->dev = dev; err = gk20a_user_init(dev, INTERFACE_NAME, &nvgpu_class); @@ -564,15 +566,15 @@ int vgpu_probe(struct platform_device *pdev) return -ENOSYS; } - platform->virt_handle = vgpu_connect(); - if (!platform->virt_handle) { + priv->virt_handle = vgpu_connect(); + if (!priv->virt_handle) { dev_err(dev, "failed to connect to server node\n"); vgpu_comm_deinit(); return -ENOSYS; } - platform->intr_handler = kthread_run(vgpu_intr_thread, gk20a, "gk20a"); - if (IS_ERR(platform->intr_handler)) + priv->intr_handler = kthread_run(vgpu_intr_thread, gk20a, "gk20a"); + if (IS_ERR(priv->intr_handler)) return -ENOMEM; gk20a_debug_init(dev); diff --git a/drivers/gpu/nvgpu/vgpu/vgpu.h b/drivers/gpu/nvgpu/vgpu/vgpu.h index f79c8aab..e1fff966 100644 --- a/drivers/gpu/nvgpu/vgpu/vgpu.h +++ b/drivers/gpu/nvgpu/vgpu/vgpu.h @@ -21,6 +21,42 @@ #include "gk20a/gk20a.h" #ifdef CONFIG_TEGRA_GR_VIRTUALIZATION + +struct vgpu_priv_data { + u64 virt_handle; + struct task_struct *intr_handler; +}; + +static inline +struct vgpu_priv_data *vgpu_get_priv_data_from_dev(struct device *dev) +{ + struct gk20a_platform *plat = gk20a_get_platform(dev); + + return (struct vgpu_priv_data *)plat->vgpu_priv; +} + +static inline struct vgpu_priv_data *vgpu_get_priv_data(struct gk20a *g) +{ + return vgpu_get_priv_data_from_dev(g->dev); +} + +static inline u64 vgpu_get_handle_from_dev(struct device *dev) +{ + struct vgpu_priv_data *priv = vgpu_get_priv_data_from_dev(dev); + + if (unlikely(!priv)) { + dev_err(dev, "invalid vgpu_priv_data in %s\n", __func__); + return INT_MAX; + } + + return priv->virt_handle; +} + +static inline u64 vgpu_get_handle(struct gk20a *g) +{ + return vgpu_get_handle_from_dev(g->dev); +} + int vgpu_pm_prepare_poweroff(struct device *dev); int vgpu_pm_finalize_poweron(struct device *dev); int vgpu_probe(struct platform_device *dev); -- cgit v1.2.2