summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRichard Zhao <rizhao@nvidia.com>2016-07-21 19:51:40 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2016-08-15 14:41:16 -0400
commite1438818b90c5b0d73aae800b12bd6b36aec5142 (patch)
treef0582cda23552526c3067e90f4cb74b461d50d73
parent33ff34887f560449828e79170a2a36a97496eeec (diff)
gpu: nvgpu: vgpu: add vgpu private data and helper functions
Move vgpu private data to a dedicated structure and allocate it at probe time. Also add virt_handle helper function which is used everywhere. JIRA VFND-2103 Change-Id: I125911420be72ca9be948125d8357fa85d1d3afd Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: http://git-master/r/1185206 GVS: Gerrit_Virtual_Submit Reviewed-by: Vladislav Buzov <vbuzov@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gk20a/platform_gk20a.h3
-rw-r--r--drivers/gpu/nvgpu/vgpu/dbg_vgpu.c6
-rw-r--r--drivers/gpu/nvgpu/vgpu/fecs_trace_vgpu.c8
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.c48
-rw-r--r--drivers/gpu/nvgpu/vgpu/gk20a/vgpu_gr_gk20a.c3
-rw-r--r--drivers/gpu/nvgpu/vgpu/gm20b/vgpu_gr_gm20b.c3
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c89
-rw-r--r--drivers/gpu/nvgpu/vgpu/ltc_vgpu.c15
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c36
-rw-r--r--drivers/gpu/nvgpu/vgpu/tsg_vgpu.c9
-rw-r--r--drivers/gpu/nvgpu/vgpu/vgpu.c30
-rw-r--r--drivers/gpu/nvgpu/vgpu/vgpu.h36
12 files changed, 130 insertions, 156 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/platform_gk20a.h b/drivers/gpu/nvgpu/gk20a/platform_gk20a.h
index 396d8db2..93158cc7 100644
--- a/drivers/gpu/nvgpu/gk20a/platform_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/platform_gk20a.h
@@ -204,8 +204,7 @@ struct gk20a_platform {
204 204
205 bool virtual_dev; 205 bool virtual_dev;
206#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION 206#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
207 u64 virt_handle; 207 void *vgpu_priv;
208 struct task_struct *intr_handler;
209#endif 208#endif
210 /* source frequency for ptimer in hz */ 209 /* source frequency for ptimer in hz */
211 u32 ptimer_src_freq; 210 u32 ptimer_src_freq;
diff --git a/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c b/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c
index 4e4379f7..c312c419 100644
--- a/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c
@@ -27,7 +27,6 @@ static int vgpu_exec_regops(struct dbg_session_gk20a *dbg_s,
27 u64 num_ops) 27 u64 num_ops)
28{ 28{
29 struct channel_gk20a *ch; 29 struct channel_gk20a *ch;
30 struct gk20a_platform *platform = gk20a_get_platform(dbg_s->g->dev);
31 struct tegra_vgpu_cmd_msg msg; 30 struct tegra_vgpu_cmd_msg msg;
32 struct tegra_vgpu_reg_ops_params *p = &msg.params.reg_ops; 31 struct tegra_vgpu_reg_ops_params *p = &msg.params.reg_ops;
33 void *oob; 32 void *oob;
@@ -54,7 +53,7 @@ static int vgpu_exec_regops(struct dbg_session_gk20a *dbg_s,
54 memcpy(oob, ops, ops_size); 53 memcpy(oob, ops, ops_size);
55 54
56 msg.cmd = TEGRA_VGPU_CMD_REG_OPS; 55 msg.cmd = TEGRA_VGPU_CMD_REG_OPS;
57 msg.handle = platform->virt_handle; 56 msg.handle = vgpu_get_handle(dbg_s->g);
58 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); 57 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
59 p->handle = ch ? ch->virt_ctx : 0; 58 p->handle = ch ? ch->virt_ctx : 0;
60 p->num_ops = num_ops; 59 p->num_ops = num_ops;
@@ -71,7 +70,6 @@ fail:
71 70
72static int vgpu_dbg_set_powergate(struct dbg_session_gk20a *dbg_s, __u32 mode) 71static int vgpu_dbg_set_powergate(struct dbg_session_gk20a *dbg_s, __u32 mode)
73{ 72{
74 struct gk20a_platform *platform = gk20a_get_platform(dbg_s->g->dev);
75 struct tegra_vgpu_cmd_msg msg; 73 struct tegra_vgpu_cmd_msg msg;
76 struct tegra_vgpu_set_powergate_params *p = &msg.params.set_powergate; 74 struct tegra_vgpu_set_powergate_params *p = &msg.params.set_powergate;
77 int err = 0; 75 int err = 0;
@@ -95,7 +93,7 @@ static int vgpu_dbg_set_powergate(struct dbg_session_gk20a *dbg_s, __u32 mode)
95 } 93 }
96 94
97 msg.cmd = TEGRA_VGPU_CMD_SET_POWERGATE; 95 msg.cmd = TEGRA_VGPU_CMD_SET_POWERGATE;
98 msg.handle = platform->virt_handle; 96 msg.handle = vgpu_get_handle(dbg_s->g);
99 p->mode = mode; 97 p->mode = mode;
100 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 98 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
101 err = err ? err : msg.ret; 99 err = err ? err : msg.ret;
diff --git a/drivers/gpu/nvgpu/vgpu/fecs_trace_vgpu.c b/drivers/gpu/nvgpu/vgpu/fecs_trace_vgpu.c
index bd1a7451..634932b7 100644
--- a/drivers/gpu/nvgpu/vgpu/fecs_trace_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/fecs_trace_vgpu.c
@@ -106,7 +106,7 @@ static int vgpu_fecs_trace_enable(struct gk20a *g)
106{ 106{
107 struct tegra_vgpu_cmd_msg msg = { 107 struct tegra_vgpu_cmd_msg msg = {
108 .cmd = TEGRA_VGPU_CMD_FECS_TRACE_ENABLE, 108 .cmd = TEGRA_VGPU_CMD_FECS_TRACE_ENABLE,
109 .handle = gk20a_get_platform(g->dev)->virt_handle, 109 .handle = vgpu_get_handle(g),
110 }; 110 };
111 int err; 111 int err;
112 112
@@ -120,7 +120,7 @@ static int vgpu_fecs_trace_disable(struct gk20a *g)
120{ 120{
121 struct tegra_vgpu_cmd_msg msg = { 121 struct tegra_vgpu_cmd_msg msg = {
122 .cmd = TEGRA_VGPU_CMD_FECS_TRACE_DISABLE, 122 .cmd = TEGRA_VGPU_CMD_FECS_TRACE_DISABLE,
123 .handle = gk20a_get_platform(g->dev)->virt_handle, 123 .handle = vgpu_get_handle(g),
124 }; 124 };
125 int err; 125 int err;
126 126
@@ -134,7 +134,7 @@ static int vgpu_fecs_trace_poll(struct gk20a *g)
134{ 134{
135 struct tegra_vgpu_cmd_msg msg = { 135 struct tegra_vgpu_cmd_msg msg = {
136 .cmd = TEGRA_VGPU_CMD_FECS_TRACE_POLL, 136 .cmd = TEGRA_VGPU_CMD_FECS_TRACE_POLL,
137 .handle = gk20a_get_platform(g->dev)->virt_handle, 137 .handle = vgpu_get_handle(g),
138 }; 138 };
139 int err; 139 int err;
140 140
@@ -190,7 +190,7 @@ static int vgpu_fecs_trace_set_filter(struct gk20a *g,
190{ 190{
191 struct tegra_vgpu_cmd_msg msg = { 191 struct tegra_vgpu_cmd_msg msg = {
192 .cmd = TEGRA_VGPU_CMD_FECS_TRACE_SET_FILTER, 192 .cmd = TEGRA_VGPU_CMD_FECS_TRACE_SET_FILTER,
193 .handle = gk20a_get_platform(g->dev)->virt_handle, 193 .handle = vgpu_get_handle(g),
194 }; 194 };
195 struct tegra_vgpu_fecs_trace_filter *p = &msg.params.fecs_trace_filter; 195 struct tegra_vgpu_fecs_trace_filter *p = &msg.params.fecs_trace_filter;
196 int err; 196 int err;
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
index 8d3a5e9f..baab42c8 100644
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
@@ -23,7 +23,6 @@
23 23
24static void vgpu_channel_bind(struct channel_gk20a *ch) 24static void vgpu_channel_bind(struct channel_gk20a *ch)
25{ 25{
26 struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev);
27 struct tegra_vgpu_cmd_msg msg; 26 struct tegra_vgpu_cmd_msg msg;
28 struct tegra_vgpu_channel_config_params *p = 27 struct tegra_vgpu_channel_config_params *p =
29 &msg.params.channel_config; 28 &msg.params.channel_config;
@@ -32,7 +31,7 @@ static void vgpu_channel_bind(struct channel_gk20a *ch)
32 gk20a_dbg_info("bind channel %d", ch->hw_chid); 31 gk20a_dbg_info("bind channel %d", ch->hw_chid);
33 32
34 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND; 33 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND;
35 msg.handle = platform->virt_handle; 34 msg.handle = vgpu_get_handle(ch->g);
36 p->handle = ch->virt_ctx; 35 p->handle = ch->virt_ctx;
37 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 36 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
38 WARN_ON(err || msg.ret); 37 WARN_ON(err || msg.ret);
@@ -42,7 +41,6 @@ static void vgpu_channel_bind(struct channel_gk20a *ch)
42 41
43static void vgpu_channel_unbind(struct channel_gk20a *ch) 42static void vgpu_channel_unbind(struct channel_gk20a *ch)
44{ 43{
45 struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev);
46 44
47 gk20a_dbg_fn(""); 45 gk20a_dbg_fn("");
48 46
@@ -53,7 +51,7 @@ static void vgpu_channel_unbind(struct channel_gk20a *ch)
53 int err; 51 int err;
54 52
55 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_UNBIND; 53 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_UNBIND;
56 msg.handle = platform->virt_handle; 54 msg.handle = vgpu_get_handle(ch->g);
57 p->handle = ch->virt_ctx; 55 p->handle = ch->virt_ctx;
58 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 56 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
59 WARN_ON(err || msg.ret); 57 WARN_ON(err || msg.ret);
@@ -64,7 +62,6 @@ static void vgpu_channel_unbind(struct channel_gk20a *ch)
64 62
65static int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) 63static int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
66{ 64{
67 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
68 struct tegra_vgpu_cmd_msg msg; 65 struct tegra_vgpu_cmd_msg msg;
69 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; 66 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx;
70 int err; 67 int err;
@@ -72,7 +69,7 @@ static int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
72 gk20a_dbg_fn(""); 69 gk20a_dbg_fn("");
73 70
74 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX; 71 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX;
75 msg.handle = platform->virt_handle; 72 msg.handle = vgpu_get_handle(g);
76 p->id = ch->hw_chid; 73 p->id = ch->hw_chid;
77 p->pid = (u64)current->tgid; 74 p->pid = (u64)current->tgid;
78 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 75 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
@@ -88,7 +85,6 @@ static int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
88 85
89static void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch) 86static void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch)
90{ 87{
91 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
92 struct tegra_vgpu_cmd_msg msg; 88 struct tegra_vgpu_cmd_msg msg;
93 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; 89 struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx;
94 int err; 90 int err;
@@ -96,7 +92,7 @@ static void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch)
96 gk20a_dbg_fn(""); 92 gk20a_dbg_fn("");
97 93
98 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWCTX; 94 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWCTX;
99 msg.handle = platform->virt_handle; 95 msg.handle = vgpu_get_handle(g);
100 p->handle = ch->virt_ctx; 96 p->handle = ch->virt_ctx;
101 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 97 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
102 WARN_ON(err || msg.ret); 98 WARN_ON(err || msg.ret);
@@ -104,7 +100,6 @@ static void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch)
104 100
105static void vgpu_channel_enable(struct channel_gk20a *ch) 101static void vgpu_channel_enable(struct channel_gk20a *ch)
106{ 102{
107 struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev);
108 struct tegra_vgpu_cmd_msg msg; 103 struct tegra_vgpu_cmd_msg msg;
109 struct tegra_vgpu_channel_config_params *p = 104 struct tegra_vgpu_channel_config_params *p =
110 &msg.params.channel_config; 105 &msg.params.channel_config;
@@ -113,7 +108,7 @@ static void vgpu_channel_enable(struct channel_gk20a *ch)
113 gk20a_dbg_fn(""); 108 gk20a_dbg_fn("");
114 109
115 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ENABLE; 110 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ENABLE;
116 msg.handle = platform->virt_handle; 111 msg.handle = vgpu_get_handle(ch->g);
117 p->handle = ch->virt_ctx; 112 p->handle = ch->virt_ctx;
118 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 113 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
119 WARN_ON(err || msg.ret); 114 WARN_ON(err || msg.ret);
@@ -121,7 +116,6 @@ static void vgpu_channel_enable(struct channel_gk20a *ch)
121 116
122static void vgpu_channel_disable(struct channel_gk20a *ch) 117static void vgpu_channel_disable(struct channel_gk20a *ch)
123{ 118{
124 struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev);
125 struct tegra_vgpu_cmd_msg msg; 119 struct tegra_vgpu_cmd_msg msg;
126 struct tegra_vgpu_channel_config_params *p = 120 struct tegra_vgpu_channel_config_params *p =
127 &msg.params.channel_config; 121 &msg.params.channel_config;
@@ -130,7 +124,7 @@ static void vgpu_channel_disable(struct channel_gk20a *ch)
130 gk20a_dbg_fn(""); 124 gk20a_dbg_fn("");
131 125
132 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_DISABLE; 126 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_DISABLE;
133 msg.handle = platform->virt_handle; 127 msg.handle = vgpu_get_handle(ch->g);
134 p->handle = ch->virt_ctx; 128 p->handle = ch->virt_ctx;
135 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 129 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
136 WARN_ON(err || msg.ret); 130 WARN_ON(err || msg.ret);
@@ -139,7 +133,6 @@ static void vgpu_channel_disable(struct channel_gk20a *ch)
139static int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base, 133static int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base,
140 u32 gpfifo_entries, u32 flags) 134 u32 gpfifo_entries, u32 flags)
141{ 135{
142 struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev);
143 struct device __maybe_unused *d = dev_from_gk20a(ch->g); 136 struct device __maybe_unused *d = dev_from_gk20a(ch->g);
144 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(d); 137 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(d);
145 struct tegra_vgpu_cmd_msg msg; 138 struct tegra_vgpu_cmd_msg msg;
@@ -149,7 +142,7 @@ static int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base,
149 gk20a_dbg_fn(""); 142 gk20a_dbg_fn("");
150 143
151 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SETUP_RAMFC; 144 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SETUP_RAMFC;
152 msg.handle = platform->virt_handle; 145 msg.handle = vgpu_get_handle(ch->g);
153 p->handle = ch->virt_ctx; 146 p->handle = ch->virt_ctx;
154 p->gpfifo_va = gpfifo_base; 147 p->gpfifo_va = gpfifo_base;
155 p->num_entries = gpfifo_entries; 148 p->num_entries = gpfifo_entries;
@@ -242,7 +235,6 @@ clean_up_runlist:
242 235
243static int vgpu_init_fifo_setup_sw(struct gk20a *g) 236static int vgpu_init_fifo_setup_sw(struct gk20a *g)
244{ 237{
245 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
246 struct fifo_gk20a *f = &g->fifo; 238 struct fifo_gk20a *f = &g->fifo;
247 struct device *d = dev_from_gk20a(g); 239 struct device *d = dev_from_gk20a(g);
248 int chid, err = 0; 240 int chid, err = 0;
@@ -256,7 +248,7 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
256 248
257 f->g = g; 249 f->g = g;
258 250
259 err = vgpu_get_attribute(platform->virt_handle, 251 err = vgpu_get_attribute(vgpu_get_handle(g),
260 TEGRA_VGPU_ATTRIB_NUM_CHANNELS, 252 TEGRA_VGPU_ATTRIB_NUM_CHANNELS,
261 &f->num_channels); 253 &f->num_channels);
262 if (err) 254 if (err)
@@ -411,7 +403,6 @@ int vgpu_init_fifo_support(struct gk20a *g)
411 403
412static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid) 404static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
413{ 405{
414 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
415 struct fifo_gk20a *f = &g->fifo; 406 struct fifo_gk20a *f = &g->fifo;
416 struct tegra_vgpu_cmd_msg msg; 407 struct tegra_vgpu_cmd_msg msg;
417 struct tegra_vgpu_channel_config_params *p = 408 struct tegra_vgpu_channel_config_params *p =
@@ -421,7 +412,7 @@ static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
421 gk20a_dbg_fn(""); 412 gk20a_dbg_fn("");
422 413
423 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_PREEMPT; 414 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_PREEMPT;
424 msg.handle = platform->virt_handle; 415 msg.handle = vgpu_get_handle(g);
425 p->handle = f->channel[hw_chid].virt_ctx; 416 p->handle = f->channel[hw_chid].virt_ctx;
426 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 417 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
427 418
@@ -436,7 +427,6 @@ static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
436 427
437static int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) 428static int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
438{ 429{
439 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
440 struct tegra_vgpu_cmd_msg msg; 430 struct tegra_vgpu_cmd_msg msg;
441 struct tegra_vgpu_tsg_preempt_params *p = 431 struct tegra_vgpu_tsg_preempt_params *p =
442 &msg.params.tsg_preempt; 432 &msg.params.tsg_preempt;
@@ -445,7 +435,7 @@ static int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
445 gk20a_dbg_fn(""); 435 gk20a_dbg_fn("");
446 436
447 msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT; 437 msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT;
448 msg.handle = platform->virt_handle; 438 msg.handle = vgpu_get_handle(g);
449 p->tsg_id = tsgid; 439 p->tsg_id = tsgid;
450 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 440 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
451 err = err ? err : msg.ret; 441 err = err ? err : msg.ret;
@@ -490,7 +480,6 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
490 u32 hw_chid, bool add, 480 u32 hw_chid, bool add,
491 bool wait_for_finish) 481 bool wait_for_finish)
492{ 482{
493 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
494 struct fifo_gk20a *f = &g->fifo; 483 struct fifo_gk20a *f = &g->fifo;
495 struct fifo_runlist_info_gk20a *runlist; 484 struct fifo_runlist_info_gk20a *runlist;
496 u16 *runlist_entry = NULL; 485 u16 *runlist_entry = NULL;
@@ -529,7 +518,7 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
529 } else /* suspend to remove all channels */ 518 } else /* suspend to remove all channels */
530 count = 0; 519 count = 0;
531 520
532 return vgpu_submit_runlist(platform->virt_handle, runlist_id, 521 return vgpu_submit_runlist(vgpu_get_handle(g), runlist_id,
533 runlist->mem[0].cpu_va, count); 522 runlist->mem[0].cpu_va, count);
534} 523}
535 524
@@ -566,7 +555,6 @@ static int vgpu_fifo_wait_engine_idle(struct gk20a *g)
566 555
567static int vgpu_channel_set_priority(struct channel_gk20a *ch, u32 priority) 556static int vgpu_channel_set_priority(struct channel_gk20a *ch, u32 priority)
568{ 557{
569 struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev);
570 struct tegra_vgpu_cmd_msg msg; 558 struct tegra_vgpu_cmd_msg msg;
571 struct tegra_vgpu_channel_priority_params *p = 559 struct tegra_vgpu_channel_priority_params *p =
572 &msg.params.channel_priority; 560 &msg.params.channel_priority;
@@ -575,7 +563,7 @@ static int vgpu_channel_set_priority(struct channel_gk20a *ch, u32 priority)
575 gk20a_dbg_info("channel %d set priority %u", ch->hw_chid, priority); 563 gk20a_dbg_info("channel %d set priority %u", ch->hw_chid, priority);
576 564
577 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_PRIORITY; 565 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_PRIORITY;
578 msg.handle = platform->virt_handle; 566 msg.handle = vgpu_get_handle(ch->g);
579 p->handle = ch->virt_ctx; 567 p->handle = ch->virt_ctx;
580 p->priority = priority; 568 p->priority = priority;
581 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 569 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
@@ -589,7 +577,6 @@ static int vgpu_fifo_tsg_set_runlist_interleave(struct gk20a *g,
589 u32 runlist_id, 577 u32 runlist_id,
590 u32 new_level) 578 u32 new_level)
591{ 579{
592 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
593 struct tegra_vgpu_cmd_msg msg = {0}; 580 struct tegra_vgpu_cmd_msg msg = {0};
594 struct tegra_vgpu_tsg_runlist_interleave_params *p = 581 struct tegra_vgpu_tsg_runlist_interleave_params *p =
595 &msg.params.tsg_interleave; 582 &msg.params.tsg_interleave;
@@ -598,7 +585,7 @@ static int vgpu_fifo_tsg_set_runlist_interleave(struct gk20a *g,
598 gk20a_dbg_fn(""); 585 gk20a_dbg_fn("");
599 586
600 msg.cmd = TEGRA_VGPU_CMD_TSG_SET_RUNLIST_INTERLEAVE; 587 msg.cmd = TEGRA_VGPU_CMD_TSG_SET_RUNLIST_INTERLEAVE;
601 msg.handle = platform->virt_handle; 588 msg.handle = vgpu_get_handle(g);
602 p->tsg_id = tsgid; 589 p->tsg_id = tsgid;
603 p->level = new_level; 590 p->level = new_level;
604 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 591 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
@@ -612,7 +599,6 @@ static int vgpu_fifo_set_runlist_interleave(struct gk20a *g,
612 u32 runlist_id, 599 u32 runlist_id,
613 u32 new_level) 600 u32 new_level)
614{ 601{
615 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
616 struct tegra_vgpu_cmd_msg msg; 602 struct tegra_vgpu_cmd_msg msg;
617 struct tegra_vgpu_channel_runlist_interleave_params *p = 603 struct tegra_vgpu_channel_runlist_interleave_params *p =
618 &msg.params.channel_interleave; 604 &msg.params.channel_interleave;
@@ -627,7 +613,7 @@ static int vgpu_fifo_set_runlist_interleave(struct gk20a *g,
627 613
628 ch = &g->fifo.channel[id]; 614 ch = &g->fifo.channel[id];
629 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_RUNLIST_INTERLEAVE; 615 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_RUNLIST_INTERLEAVE;
630 msg.handle = platform->virt_handle; 616 msg.handle = vgpu_get_handle(ch->g);
631 p->handle = ch->virt_ctx; 617 p->handle = ch->virt_ctx;
632 p->level = new_level; 618 p->level = new_level;
633 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 619 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
@@ -637,7 +623,6 @@ static int vgpu_fifo_set_runlist_interleave(struct gk20a *g,
637 623
638static int vgpu_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice) 624static int vgpu_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice)
639{ 625{
640 struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev);
641 struct tegra_vgpu_cmd_msg msg; 626 struct tegra_vgpu_cmd_msg msg;
642 struct tegra_vgpu_channel_timeslice_params *p = 627 struct tegra_vgpu_channel_timeslice_params *p =
643 &msg.params.channel_timeslice; 628 &msg.params.channel_timeslice;
@@ -646,7 +631,7 @@ static int vgpu_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice)
646 gk20a_dbg_fn(""); 631 gk20a_dbg_fn("");
647 632
648 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_TIMESLICE; 633 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_TIMESLICE;
649 msg.handle = platform->virt_handle; 634 msg.handle = vgpu_get_handle(ch->g);
650 p->handle = ch->virt_ctx; 635 p->handle = ch->virt_ctx;
651 p->timeslice_us = timeslice; 636 p->timeslice_us = timeslice;
652 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 637 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
@@ -659,7 +644,6 @@ static int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch, bool verbose)
659 struct tsg_gk20a *tsg = NULL; 644 struct tsg_gk20a *tsg = NULL;
660 struct channel_gk20a *ch_tsg = NULL; 645 struct channel_gk20a *ch_tsg = NULL;
661 struct gk20a *g = ch->g; 646 struct gk20a *g = ch->g;
662 struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev);
663 struct tegra_vgpu_cmd_msg msg = {0}; 647 struct tegra_vgpu_cmd_msg msg = {0};
664 struct tegra_vgpu_channel_config_params *p = 648 struct tegra_vgpu_channel_config_params *p =
665 &msg.params.channel_config; 649 &msg.params.channel_config;
@@ -687,7 +671,7 @@ static int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch, bool verbose)
687 } 671 }
688 672
689 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FORCE_RESET; 673 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FORCE_RESET;
690 msg.handle = platform->virt_handle; 674 msg.handle = vgpu_get_handle(ch->g);
691 p->handle = ch->virt_ctx; 675 p->handle = ch->virt_ctx;
692 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 676 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
693 WARN_ON(err || msg.ret); 677 WARN_ON(err || msg.ret);
diff --git a/drivers/gpu/nvgpu/vgpu/gk20a/vgpu_gr_gk20a.c b/drivers/gpu/nvgpu/vgpu/gk20a/vgpu_gr_gk20a.c
index 2cfe16da..a8526457 100644
--- a/drivers/gpu/nvgpu/vgpu/gk20a/vgpu_gr_gk20a.c
+++ b/drivers/gpu/nvgpu/vgpu/gk20a/vgpu_gr_gk20a.c
@@ -20,12 +20,11 @@
20 20
21static void vgpu_gk20a_detect_sm_arch(struct gk20a *g) 21static void vgpu_gk20a_detect_sm_arch(struct gk20a *g)
22{ 22{
23 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
24 u32 v = 0, raw_version, version = 0; 23 u32 v = 0, raw_version, version = 0;
25 24
26 gk20a_dbg_fn(""); 25 gk20a_dbg_fn("");
27 26
28 if (vgpu_get_attribute(platform->virt_handle, 27 if (vgpu_get_attribute(vgpu_get_handle(g),
29 TEGRA_VGPU_ATTRIB_GPC0_TPC0_SM_ARCH, &v)) 28 TEGRA_VGPU_ATTRIB_GPC0_TPC0_SM_ARCH, &v))
30 gk20a_err(dev_from_gk20a(g), "failed to retrieve SM arch"); 29 gk20a_err(dev_from_gk20a(g), "failed to retrieve SM arch");
31 30
diff --git a/drivers/gpu/nvgpu/vgpu/gm20b/vgpu_gr_gm20b.c b/drivers/gpu/nvgpu/vgpu/gm20b/vgpu_gr_gm20b.c
index fb1f31d8..aaddd218 100644
--- a/drivers/gpu/nvgpu/vgpu/gm20b/vgpu_gr_gm20b.c
+++ b/drivers/gpu/nvgpu/vgpu/gm20b/vgpu_gr_gm20b.c
@@ -20,12 +20,11 @@
20 20
21static void vgpu_gm20b_detect_sm_arch(struct gk20a *g) 21static void vgpu_gm20b_detect_sm_arch(struct gk20a *g)
22{ 22{
23 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
24 u32 v = 0; 23 u32 v = 0;
25 24
26 gk20a_dbg_fn(""); 25 gk20a_dbg_fn("");
27 26
28 if (vgpu_get_attribute(platform->virt_handle, 27 if (vgpu_get_attribute(vgpu_get_handle(g),
29 TEGRA_VGPU_ATTRIB_GPC0_TPC0_SM_ARCH, &v)) 28 TEGRA_VGPU_ATTRIB_GPC0_TPC0_SM_ARCH, &v))
30 gk20a_err(dev_from_gk20a(g), "failed to retrieve SM arch"); 29 gk20a_err(dev_from_gk20a(g), "failed to retrieve SM arch");
31 30
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index f395ac1e..5477bca0 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -19,7 +19,6 @@
19 19
20static int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va) 20static int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va)
21{ 21{
22 struct gk20a_platform *platform = gk20a_get_platform(c->g->dev);
23 struct tegra_vgpu_cmd_msg msg; 22 struct tegra_vgpu_cmd_msg msg;
24 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; 23 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
25 int err; 24 int err;
@@ -27,7 +26,7 @@ static int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va)
27 gk20a_dbg_fn(""); 26 gk20a_dbg_fn("");
28 27
29 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_CTX; 28 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_CTX;
30 msg.handle = platform->virt_handle; 29 msg.handle = vgpu_get_handle(c->g);
31 p->handle = c->virt_ctx; 30 p->handle = c->virt_ctx;
32 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 31 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
33 32
@@ -37,7 +36,6 @@ static int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va)
37static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g, 36static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g,
38 struct channel_gk20a *c, bool patch) 37 struct channel_gk20a *c, bool patch)
39{ 38{
40 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
41 struct tegra_vgpu_cmd_msg msg; 39 struct tegra_vgpu_cmd_msg msg;
42 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; 40 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
43 int err; 41 int err;
@@ -45,7 +43,7 @@ static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g,
45 gk20a_dbg_fn(""); 43 gk20a_dbg_fn("");
46 44
47 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_GLOBAL_CTX; 45 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_GLOBAL_CTX;
48 msg.handle = platform->virt_handle; 46 msg.handle = vgpu_get_handle(g);
49 p->handle = c->virt_ctx; 47 p->handle = c->virt_ctx;
50 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 48 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
51 49
@@ -56,7 +54,6 @@ static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g,
56static int vgpu_gr_load_golden_ctx_image(struct gk20a *g, 54static int vgpu_gr_load_golden_ctx_image(struct gk20a *g,
57 struct channel_gk20a *c) 55 struct channel_gk20a *c)
58{ 56{
59 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
60 struct tegra_vgpu_cmd_msg msg; 57 struct tegra_vgpu_cmd_msg msg;
61 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; 58 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
62 int err; 59 int err;
@@ -64,7 +61,7 @@ static int vgpu_gr_load_golden_ctx_image(struct gk20a *g,
64 gk20a_dbg_fn(""); 61 gk20a_dbg_fn("");
65 62
66 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_LOAD_GR_GOLDEN_CTX; 63 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_LOAD_GR_GOLDEN_CTX;
67 msg.handle = platform->virt_handle; 64 msg.handle = vgpu_get_handle(g);
68 p->handle = c->virt_ctx; 65 p->handle = c->virt_ctx;
69 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 66 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
70 67
@@ -73,15 +70,14 @@ static int vgpu_gr_load_golden_ctx_image(struct gk20a *g,
73 70
74int vgpu_gr_init_ctx_state(struct gk20a *g) 71int vgpu_gr_init_ctx_state(struct gk20a *g)
75{ 72{
76 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
77 struct gr_gk20a *gr = &g->gr; 73 struct gr_gk20a *gr = &g->gr;
78 74
79 gk20a_dbg_fn(""); 75 gk20a_dbg_fn("");
80 76
81 vgpu_get_attribute(platform->virt_handle, 77 vgpu_get_attribute(vgpu_get_handle(g),
82 TEGRA_VGPU_ATTRIB_GOLDEN_CTX_SIZE, 78 TEGRA_VGPU_ATTRIB_GOLDEN_CTX_SIZE,
83 &g->gr.ctx_vars.golden_image_size); 79 &g->gr.ctx_vars.golden_image_size);
84 vgpu_get_attribute(platform->virt_handle, 80 vgpu_get_attribute(vgpu_get_handle(g),
85 TEGRA_VGPU_ATTRIB_ZCULL_CTX_SIZE, 81 TEGRA_VGPU_ATTRIB_ZCULL_CTX_SIZE,
86 &g->gr.ctx_vars.zcull_ctxsw_image_size); 82 &g->gr.ctx_vars.zcull_ctxsw_image_size);
87 if (!g->gr.ctx_vars.golden_image_size || 83 if (!g->gr.ctx_vars.golden_image_size ||
@@ -128,7 +124,6 @@ static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g)
128static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, 124static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
129 struct channel_gk20a *c) 125 struct channel_gk20a *c)
130{ 126{
131 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
132 struct tegra_vgpu_cmd_msg msg; 127 struct tegra_vgpu_cmd_msg msg;
133 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; 128 struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
134 struct vm_gk20a *ch_vm = c->vm; 129 struct vm_gk20a *ch_vm = c->vm;
@@ -183,7 +178,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
183 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size; 178 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size;
184 179
185 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_MAP_GR_GLOBAL_CTX; 180 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_MAP_GR_GLOBAL_CTX;
186 msg.handle = platform->virt_handle; 181 msg.handle = vgpu_get_handle(g);
187 p->handle = c->virt_ctx; 182 p->handle = c->virt_ctx;
188 p->cb_va = g_bfr_va[CIRCULAR_VA]; 183 p->cb_va = g_bfr_va[CIRCULAR_VA];
189 p->attr_va = g_bfr_va[ATTRIBUTE_VA]; 184 p->attr_va = g_bfr_va[ATTRIBUTE_VA];
@@ -209,7 +204,6 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
209 204
210static void vgpu_gr_unmap_global_ctx_buffers(struct channel_gk20a *c) 205static void vgpu_gr_unmap_global_ctx_buffers(struct channel_gk20a *c)
211{ 206{
212 struct gk20a_platform *platform = gk20a_get_platform(c->g->dev);
213 struct vm_gk20a *ch_vm = c->vm; 207 struct vm_gk20a *ch_vm = c->vm;
214 u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va; 208 u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va;
215 u64 *g_bfr_size = c->ch_ctx.global_ctx_buffer_size; 209 u64 *g_bfr_size = c->ch_ctx.global_ctx_buffer_size;
@@ -223,7 +217,7 @@ static void vgpu_gr_unmap_global_ctx_buffers(struct channel_gk20a *c)
223 int err; 217 int err;
224 218
225 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_UNMAP_GR_GLOBAL_CTX; 219 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_UNMAP_GR_GLOBAL_CTX;
226 msg.handle = platform->virt_handle; 220 msg.handle = vgpu_get_handle(c->g);
227 p->handle = c->virt_ctx; 221 p->handle = c->virt_ctx;
228 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 222 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
229 WARN_ON(err || msg.ret); 223 WARN_ON(err || msg.ret);
@@ -245,7 +239,6 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
245 u32 class, 239 u32 class,
246 u32 flags) 240 u32 flags)
247{ 241{
248 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
249 struct tegra_vgpu_cmd_msg msg = {0}; 242 struct tegra_vgpu_cmd_msg msg = {0};
250 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; 243 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
251 struct gr_gk20a *gr = &g->gr; 244 struct gr_gk20a *gr = &g->gr;
@@ -276,7 +269,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
276 } 269 }
277 270
278 msg.cmd = TEGRA_VGPU_CMD_GR_CTX_ALLOC; 271 msg.cmd = TEGRA_VGPU_CMD_GR_CTX_ALLOC;
279 msg.handle = platform->virt_handle; 272 msg.handle = vgpu_get_handle(g);
280 p->as_handle = vm->handle; 273 p->as_handle = vm->handle;
281 p->gr_ctx_va = gr_ctx->mem.gpu_va; 274 p->gr_ctx_va = gr_ctx->mem.gpu_va;
282 p->class_num = class; 275 p->class_num = class;
@@ -302,13 +295,12 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
302 gk20a_dbg_fn(""); 295 gk20a_dbg_fn("");
303 296
304 if (gr_ctx && gr_ctx->mem.gpu_va) { 297 if (gr_ctx && gr_ctx->mem.gpu_va) {
305 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
306 struct tegra_vgpu_cmd_msg msg; 298 struct tegra_vgpu_cmd_msg msg;
307 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; 299 struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
308 int err; 300 int err;
309 301
310 msg.cmd = TEGRA_VGPU_CMD_GR_CTX_FREE; 302 msg.cmd = TEGRA_VGPU_CMD_GR_CTX_FREE;
311 msg.handle = platform->virt_handle; 303 msg.handle = vgpu_get_handle(g);
312 p->gr_ctx_handle = gr_ctx->virt_ctx; 304 p->gr_ctx_handle = gr_ctx->virt_ctx;
313 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 305 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
314 WARN_ON(err || msg.ret); 306 WARN_ON(err || msg.ret);
@@ -329,7 +321,6 @@ static void vgpu_gr_free_channel_gr_ctx(struct channel_gk20a *c)
329static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g, 321static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
330 struct channel_gk20a *c) 322 struct channel_gk20a *c)
331{ 323{
332 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
333 struct patch_desc *patch_ctx = &c->ch_ctx.patch_ctx; 324 struct patch_desc *patch_ctx = &c->ch_ctx.patch_ctx;
334 struct vm_gk20a *ch_vm = c->vm; 325 struct vm_gk20a *ch_vm = c->vm;
335 struct tegra_vgpu_cmd_msg msg; 326 struct tegra_vgpu_cmd_msg msg;
@@ -346,7 +337,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
346 return -ENOMEM; 337 return -ENOMEM;
347 338
348 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_PATCH_CTX; 339 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_PATCH_CTX;
349 msg.handle = platform->virt_handle; 340 msg.handle = vgpu_get_handle(g);
350 p->handle = c->virt_ctx; 341 p->handle = c->virt_ctx;
351 p->patch_ctx_va = patch_ctx->mem.gpu_va; 342 p->patch_ctx_va = patch_ctx->mem.gpu_va;
352 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 343 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
@@ -361,7 +352,6 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
361 352
362static void vgpu_gr_free_channel_patch_ctx(struct channel_gk20a *c) 353static void vgpu_gr_free_channel_patch_ctx(struct channel_gk20a *c)
363{ 354{
364 struct gk20a_platform *platform = gk20a_get_platform(c->g->dev);
365 struct patch_desc *patch_ctx = &c->ch_ctx.patch_ctx; 355 struct patch_desc *patch_ctx = &c->ch_ctx.patch_ctx;
366 struct vm_gk20a *ch_vm = c->vm; 356 struct vm_gk20a *ch_vm = c->vm;
367 357
@@ -373,7 +363,7 @@ static void vgpu_gr_free_channel_patch_ctx(struct channel_gk20a *c)
373 int err; 363 int err;
374 364
375 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_GR_PATCH_CTX; 365 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_GR_PATCH_CTX;
376 msg.handle = platform->virt_handle; 366 msg.handle = vgpu_get_handle(c->g);
377 p->handle = c->virt_ctx; 367 p->handle = c->virt_ctx;
378 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 368 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
379 WARN_ON(err || msg.ret); 369 WARN_ON(err || msg.ret);
@@ -386,7 +376,6 @@ static void vgpu_gr_free_channel_patch_ctx(struct channel_gk20a *c)
386 376
387static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c) 377static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c)
388{ 378{
389 struct gk20a_platform *platform = gk20a_get_platform(c->g->dev);
390 struct tegra_vgpu_cmd_msg msg; 379 struct tegra_vgpu_cmd_msg msg;
391 struct tegra_vgpu_channel_free_hwpm_ctx *p = &msg.params.free_hwpm_ctx; 380 struct tegra_vgpu_channel_free_hwpm_ctx *p = &msg.params.free_hwpm_ctx;
392 struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx; 381 struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx;
@@ -399,7 +388,7 @@ static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c)
399 return; 388 return;
400 389
401 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWPM_CTX; 390 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWPM_CTX;
402 msg.handle = platform->virt_handle; 391 msg.handle = vgpu_get_handle(c->g);
403 p->handle = c->virt_ctx; 392 p->handle = c->virt_ctx;
404 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 393 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
405 WARN_ON(err || msg.ret); 394 WARN_ON(err || msg.ret);
@@ -427,7 +416,6 @@ static void vgpu_gr_free_channel_ctx(struct channel_gk20a *c)
427 416
428static int vgpu_gr_ch_bind_gr_ctx(struct channel_gk20a *c) 417static int vgpu_gr_ch_bind_gr_ctx(struct channel_gk20a *c)
429{ 418{
430 struct gk20a_platform *platform = gk20a_get_platform(c->g->dev);
431 struct gr_ctx_desc *gr_ctx = c->ch_ctx.gr_ctx; 419 struct gr_ctx_desc *gr_ctx = c->ch_ctx.gr_ctx;
432 struct tegra_vgpu_cmd_msg msg = {0}; 420 struct tegra_vgpu_cmd_msg msg = {0};
433 struct tegra_vgpu_channel_bind_gr_ctx_params *p = 421 struct tegra_vgpu_channel_bind_gr_ctx_params *p =
@@ -435,7 +423,7 @@ static int vgpu_gr_ch_bind_gr_ctx(struct channel_gk20a *c)
435 int err; 423 int err;
436 424
437 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTX; 425 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTX;
438 msg.handle = platform->virt_handle; 426 msg.handle = vgpu_get_handle(c->g);
439 p->ch_handle = c->virt_ctx; 427 p->ch_handle = c->virt_ctx;
440 p->gr_ctx_handle = gr_ctx->virt_ctx; 428 p->gr_ctx_handle = gr_ctx->virt_ctx;
441 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 429 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
@@ -447,7 +435,6 @@ static int vgpu_gr_ch_bind_gr_ctx(struct channel_gk20a *c)
447 435
448static int vgpu_gr_tsg_bind_gr_ctx(struct tsg_gk20a *tsg) 436static int vgpu_gr_tsg_bind_gr_ctx(struct tsg_gk20a *tsg)
449{ 437{
450 struct gk20a_platform *platform = gk20a_get_platform(tsg->g->dev);
451 struct gr_ctx_desc *gr_ctx = tsg->tsg_gr_ctx; 438 struct gr_ctx_desc *gr_ctx = tsg->tsg_gr_ctx;
452 struct tegra_vgpu_cmd_msg msg = {0}; 439 struct tegra_vgpu_cmd_msg msg = {0};
453 struct tegra_vgpu_tsg_bind_gr_ctx_params *p = 440 struct tegra_vgpu_tsg_bind_gr_ctx_params *p =
@@ -455,7 +442,7 @@ static int vgpu_gr_tsg_bind_gr_ctx(struct tsg_gk20a *tsg)
455 int err; 442 int err;
456 443
457 msg.cmd = TEGRA_VGPU_CMD_TSG_BIND_GR_CTX; 444 msg.cmd = TEGRA_VGPU_CMD_TSG_BIND_GR_CTX;
458 msg.handle = platform->virt_handle; 445 msg.handle = vgpu_get_handle(tsg->g);
459 p->tsg_id = tsg->tsgid; 446 p->tsg_id = tsg->tsgid;
460 p->gr_ctx_handle = gr_ctx->virt_ctx; 447 p->gr_ctx_handle = gr_ctx->virt_ctx;
461 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 448 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
@@ -621,12 +608,11 @@ static int vgpu_gr_free_obj_ctx(struct channel_gk20a *c,
621 608
622static u32 vgpu_gr_get_gpc_tpc_count(struct gk20a *g, u32 gpc_index) 609static u32 vgpu_gr_get_gpc_tpc_count(struct gk20a *g, u32 gpc_index)
623{ 610{
624 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
625 u32 data; 611 u32 data;
626 612
627 WARN_ON(gpc_index > 0); 613 WARN_ON(gpc_index > 0);
628 614
629 if (vgpu_get_attribute(platform->virt_handle, 615 if (vgpu_get_attribute(vgpu_get_handle(g),
630 TEGRA_VGPU_ATTRIB_GPC0_TPC_COUNT, &data)) 616 TEGRA_VGPU_ATTRIB_GPC0_TPC_COUNT, &data))
631 gk20a_err(dev_from_gk20a(g), "failed to retrieve gpc0_tpc_count"); 617 gk20a_err(dev_from_gk20a(g), "failed to retrieve gpc0_tpc_count");
632 return data; 618 return data;
@@ -634,26 +620,25 @@ static u32 vgpu_gr_get_gpc_tpc_count(struct gk20a *g, u32 gpc_index)
634 620
635static int vgpu_gr_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) 621static int vgpu_gr_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
636{ 622{
637 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
638 u32 gpc_index; 623 u32 gpc_index;
639 624
640 gk20a_dbg_fn(""); 625 gk20a_dbg_fn("");
641 626
642 if (vgpu_get_attribute(platform->virt_handle, 627 if (vgpu_get_attribute(vgpu_get_handle(g),
643 TEGRA_VGPU_ATTRIB_GPC_COUNT, &gr->gpc_count)) 628 TEGRA_VGPU_ATTRIB_GPC_COUNT, &gr->gpc_count))
644 return -ENOMEM; 629 return -ENOMEM;
645 630
646 if (vgpu_get_attribute(platform->virt_handle, 631 if (vgpu_get_attribute(vgpu_get_handle(g),
647 TEGRA_VGPU_ATTRIB_MAX_TPC_PER_GPC_COUNT, 632 TEGRA_VGPU_ATTRIB_MAX_TPC_PER_GPC_COUNT,
648 &gr->max_tpc_per_gpc_count)) 633 &gr->max_tpc_per_gpc_count))
649 return -ENOMEM; 634 return -ENOMEM;
650 635
651 if (vgpu_get_attribute(platform->virt_handle, 636 if (vgpu_get_attribute(vgpu_get_handle(g),
652 TEGRA_VGPU_ATTRIB_MAX_TPC_COUNT, 637 TEGRA_VGPU_ATTRIB_MAX_TPC_COUNT,
653 &gr->max_tpc_count)) 638 &gr->max_tpc_count))
654 return -ENOMEM; 639 return -ENOMEM;
655 640
656 if (vgpu_get_attribute(platform->virt_handle, 641 if (vgpu_get_attribute(vgpu_get_handle(g),
657 TEGRA_VGPU_ATTRIB_TPC_COUNT, 642 TEGRA_VGPU_ATTRIB_TPC_COUNT,
658 &gr->tpc_count)) 643 &gr->tpc_count))
659 return -ENOMEM; 644 return -ENOMEM;
@@ -701,7 +686,6 @@ static int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr,
701 struct channel_gk20a *c, u64 zcull_va, 686 struct channel_gk20a *c, u64 zcull_va,
702 u32 mode) 687 u32 mode)
703{ 688{
704 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
705 struct tegra_vgpu_cmd_msg msg; 689 struct tegra_vgpu_cmd_msg msg;
706 struct tegra_vgpu_zcull_bind_params *p = &msg.params.zcull_bind; 690 struct tegra_vgpu_zcull_bind_params *p = &msg.params.zcull_bind;
707 int err; 691 int err;
@@ -709,7 +693,7 @@ static int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr,
709 gk20a_dbg_fn(""); 693 gk20a_dbg_fn("");
710 694
711 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_ZCULL; 695 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_ZCULL;
712 msg.handle = platform->virt_handle; 696 msg.handle = vgpu_get_handle(g);
713 p->handle = c->virt_ctx; 697 p->handle = c->virt_ctx;
714 p->zcull_va = zcull_va; 698 p->zcull_va = zcull_va;
715 p->mode = mode; 699 p->mode = mode;
@@ -721,7 +705,6 @@ static int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr,
721static int vgpu_gr_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr, 705static int vgpu_gr_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr,
722 struct gr_zcull_info *zcull_params) 706 struct gr_zcull_info *zcull_params)
723{ 707{
724 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
725 struct tegra_vgpu_cmd_msg msg; 708 struct tegra_vgpu_cmd_msg msg;
726 struct tegra_vgpu_zcull_info_params *p = &msg.params.zcull_info; 709 struct tegra_vgpu_zcull_info_params *p = &msg.params.zcull_info;
727 int err; 710 int err;
@@ -729,7 +712,7 @@ static int vgpu_gr_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr,
729 gk20a_dbg_fn(""); 712 gk20a_dbg_fn("");
730 713
731 msg.cmd = TEGRA_VGPU_CMD_GET_ZCULL_INFO; 714 msg.cmd = TEGRA_VGPU_CMD_GET_ZCULL_INFO;
732 msg.handle = platform->virt_handle; 715 msg.handle = vgpu_get_handle(g);
733 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 716 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
734 if (err || msg.ret) 717 if (err || msg.ret)
735 return -ENOMEM; 718 return -ENOMEM;
@@ -752,12 +735,11 @@ static int vgpu_gr_get_zcull_info(struct gk20a *g, struct gr_gk20a *gr,
752 735
753static u32 vgpu_gr_get_gpc_tpc_mask(struct gk20a *g, u32 gpc_index) 736static u32 vgpu_gr_get_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
754{ 737{
755 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
756 u32 data; 738 u32 data;
757 739
758 WARN_ON(gpc_index > 0); 740 WARN_ON(gpc_index > 0);
759 741
760 if (vgpu_get_attribute(platform->virt_handle, 742 if (vgpu_get_attribute(vgpu_get_handle(g),
761 TEGRA_VGPU_ATTRIB_GPC0_TPC_MASK, &data)) 743 TEGRA_VGPU_ATTRIB_GPC0_TPC_MASK, &data))
762 gk20a_err(dev_from_gk20a(g), "failed to retrieve gpc0_tpc_mask"); 744 gk20a_err(dev_from_gk20a(g), "failed to retrieve gpc0_tpc_mask");
763 745
@@ -766,12 +748,11 @@ static u32 vgpu_gr_get_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
766 748
767static u32 vgpu_gr_get_max_fbps_count(struct gk20a *g) 749static u32 vgpu_gr_get_max_fbps_count(struct gk20a *g)
768{ 750{
769 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
770 u32 max_fbps_count = 0; 751 u32 max_fbps_count = 0;
771 752
772 gk20a_dbg_fn(""); 753 gk20a_dbg_fn("");
773 754
774 if (vgpu_get_attribute(platform->virt_handle, 755 if (vgpu_get_attribute(vgpu_get_handle(g),
775 TEGRA_VGPU_ATTRIB_NUM_FBPS, &max_fbps_count)) 756 TEGRA_VGPU_ATTRIB_NUM_FBPS, &max_fbps_count))
776 gk20a_err(dev_from_gk20a(g), "failed to retrieve num fbps"); 757 gk20a_err(dev_from_gk20a(g), "failed to retrieve num fbps");
777 758
@@ -780,12 +761,11 @@ static u32 vgpu_gr_get_max_fbps_count(struct gk20a *g)
780 761
781static u32 vgpu_gr_get_fbp_en_mask(struct gk20a *g) 762static u32 vgpu_gr_get_fbp_en_mask(struct gk20a *g)
782{ 763{
783 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
784 u32 fbp_en_mask = 0; 764 u32 fbp_en_mask = 0;
785 765
786 gk20a_dbg_fn(""); 766 gk20a_dbg_fn("");
787 767
788 if (vgpu_get_attribute(platform->virt_handle, 768 if (vgpu_get_attribute(vgpu_get_handle(g),
789 TEGRA_VGPU_ATTRIB_FBP_EN_MASK, &fbp_en_mask)) 769 TEGRA_VGPU_ATTRIB_FBP_EN_MASK, &fbp_en_mask))
790 gk20a_err(dev_from_gk20a(g), "failed to retrieve fbp en mask"); 770 gk20a_err(dev_from_gk20a(g), "failed to retrieve fbp en mask");
791 771
@@ -794,12 +774,11 @@ static u32 vgpu_gr_get_fbp_en_mask(struct gk20a *g)
794 774
795static u32 vgpu_gr_get_max_ltc_per_fbp(struct gk20a *g) 775static u32 vgpu_gr_get_max_ltc_per_fbp(struct gk20a *g)
796{ 776{
797 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
798 u32 val = 0; 777 u32 val = 0;
799 778
800 gk20a_dbg_fn(""); 779 gk20a_dbg_fn("");
801 780
802 if (vgpu_get_attribute(platform->virt_handle, 781 if (vgpu_get_attribute(vgpu_get_handle(g),
803 TEGRA_VGPU_ATTRIB_MAX_LTC_PER_FBP, &val)) 782 TEGRA_VGPU_ATTRIB_MAX_LTC_PER_FBP, &val))
804 gk20a_err(dev_from_gk20a(g), "failed to retrieve max ltc per fbp"); 783 gk20a_err(dev_from_gk20a(g), "failed to retrieve max ltc per fbp");
805 784
@@ -808,12 +787,11 @@ static u32 vgpu_gr_get_max_ltc_per_fbp(struct gk20a *g)
808 787
809static u32 vgpu_gr_get_max_lts_per_ltc(struct gk20a *g) 788static u32 vgpu_gr_get_max_lts_per_ltc(struct gk20a *g)
810{ 789{
811 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
812 u32 val = 0; 790 u32 val = 0;
813 791
814 gk20a_dbg_fn(""); 792 gk20a_dbg_fn("");
815 793
816 if (vgpu_get_attribute(platform->virt_handle, 794 if (vgpu_get_attribute(vgpu_get_handle(g),
817 TEGRA_VGPU_ATTRIB_MAX_LTS_PER_LTC, &val)) 795 TEGRA_VGPU_ATTRIB_MAX_LTS_PER_LTC, &val))
818 gk20a_err(dev_from_gk20a(g), "failed to retrieve lts per ltc"); 796 gk20a_err(dev_from_gk20a(g), "failed to retrieve lts per ltc");
819 797
@@ -829,7 +807,6 @@ static u32 *vgpu_gr_rop_l2_en_mask(struct gk20a *g)
829static int vgpu_gr_add_zbc(struct gk20a *g, struct gr_gk20a *gr, 807static int vgpu_gr_add_zbc(struct gk20a *g, struct gr_gk20a *gr,
830 struct zbc_entry *zbc_val) 808 struct zbc_entry *zbc_val)
831{ 809{
832 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
833 struct tegra_vgpu_cmd_msg msg = {0}; 810 struct tegra_vgpu_cmd_msg msg = {0};
834 struct tegra_vgpu_zbc_set_table_params *p = &msg.params.zbc_set_table; 811 struct tegra_vgpu_zbc_set_table_params *p = &msg.params.zbc_set_table;
835 int err; 812 int err;
@@ -837,7 +814,7 @@ static int vgpu_gr_add_zbc(struct gk20a *g, struct gr_gk20a *gr,
837 gk20a_dbg_fn(""); 814 gk20a_dbg_fn("");
838 815
839 msg.cmd = TEGRA_VGPU_CMD_ZBC_SET_TABLE; 816 msg.cmd = TEGRA_VGPU_CMD_ZBC_SET_TABLE;
840 msg.handle = platform->virt_handle; 817 msg.handle = vgpu_get_handle(g);
841 818
842 p->type = zbc_val->type; 819 p->type = zbc_val->type;
843 p->format = zbc_val->format; 820 p->format = zbc_val->format;
@@ -861,7 +838,6 @@ static int vgpu_gr_add_zbc(struct gk20a *g, struct gr_gk20a *gr,
861static int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr, 838static int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
862 struct zbc_query_params *query_params) 839 struct zbc_query_params *query_params)
863{ 840{
864 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
865 struct tegra_vgpu_cmd_msg msg = {0}; 841 struct tegra_vgpu_cmd_msg msg = {0};
866 struct tegra_vgpu_zbc_query_table_params *p = 842 struct tegra_vgpu_zbc_query_table_params *p =
867 &msg.params.zbc_query_table; 843 &msg.params.zbc_query_table;
@@ -870,7 +846,7 @@ static int vgpu_gr_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
870 gk20a_dbg_fn(""); 846 gk20a_dbg_fn("");
871 847
872 msg.cmd = TEGRA_VGPU_CMD_ZBC_QUERY_TABLE; 848 msg.cmd = TEGRA_VGPU_CMD_ZBC_QUERY_TABLE;
873 msg.handle = platform->virt_handle; 849 msg.handle = vgpu_get_handle(g);
874 850
875 p->type = query_params->type; 851 p->type = query_params->type;
876 p->index_size = query_params->index_size; 852 p->index_size = query_params->index_size;
@@ -1048,7 +1024,6 @@ int vgpu_gr_nonstall_isr(struct gk20a *g,
1048static int vgpu_gr_set_sm_debug_mode(struct gk20a *g, 1024static int vgpu_gr_set_sm_debug_mode(struct gk20a *g,
1049 struct channel_gk20a *ch, u64 sms, bool enable) 1025 struct channel_gk20a *ch, u64 sms, bool enable)
1050{ 1026{
1051 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
1052 struct tegra_vgpu_cmd_msg msg; 1027 struct tegra_vgpu_cmd_msg msg;
1053 struct tegra_vgpu_sm_debug_mode *p = &msg.params.sm_debug_mode; 1028 struct tegra_vgpu_sm_debug_mode *p = &msg.params.sm_debug_mode;
1054 int err; 1029 int err;
@@ -1056,7 +1031,7 @@ static int vgpu_gr_set_sm_debug_mode(struct gk20a *g,
1056 gk20a_dbg_fn(""); 1031 gk20a_dbg_fn("");
1057 1032
1058 msg.cmd = TEGRA_VGPU_CMD_SET_SM_DEBUG_MODE; 1033 msg.cmd = TEGRA_VGPU_CMD_SET_SM_DEBUG_MODE;
1059 msg.handle = platform->virt_handle; 1034 msg.handle = vgpu_get_handle(g);
1060 p->handle = ch->virt_ctx; 1035 p->handle = ch->virt_ctx;
1061 p->sms = sms; 1036 p->sms = sms;
1062 p->enable = (u32)enable; 1037 p->enable = (u32)enable;
@@ -1069,7 +1044,6 @@ static int vgpu_gr_set_sm_debug_mode(struct gk20a *g,
1069static int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g, 1044static int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g,
1070 struct channel_gk20a *ch, bool enable) 1045 struct channel_gk20a *ch, bool enable)
1071{ 1046{
1072 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
1073 struct tegra_vgpu_cmd_msg msg; 1047 struct tegra_vgpu_cmd_msg msg;
1074 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; 1048 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode;
1075 int err; 1049 int err;
@@ -1077,7 +1051,7 @@ static int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g,
1077 gk20a_dbg_fn(""); 1051 gk20a_dbg_fn("");
1078 1052
1079 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_SMPC_CTXSW_MODE; 1053 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_SMPC_CTXSW_MODE;
1080 msg.handle = platform->virt_handle; 1054 msg.handle = vgpu_get_handle(g);
1081 p->handle = ch->virt_ctx; 1055 p->handle = ch->virt_ctx;
1082 1056
1083 if (enable) 1057 if (enable)
@@ -1094,7 +1068,6 @@ static int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g,
1094static int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g, 1068static int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
1095 struct channel_gk20a *ch, bool enable) 1069 struct channel_gk20a *ch, bool enable)
1096{ 1070{
1097 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
1098 struct tegra_vgpu_cmd_msg msg; 1071 struct tegra_vgpu_cmd_msg msg;
1099 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; 1072 struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode;
1100 int err; 1073 int err;
@@ -1102,7 +1075,7 @@ static int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
1102 gk20a_dbg_fn(""); 1075 gk20a_dbg_fn("");
1103 1076
1104 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_HWPM_CTXSW_MODE; 1077 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_HWPM_CTXSW_MODE;
1105 msg.handle = platform->virt_handle; 1078 msg.handle = vgpu_get_handle(g);
1106 p->handle = ch->virt_ctx; 1079 p->handle = ch->virt_ctx;
1107 1080
1108 /* If we just enabled HWPM context switching, flag this 1081 /* If we just enabled HWPM context switching, flag this
diff --git a/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c b/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c
index 1adb8b22..3e3f67c6 100644
--- a/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/ltc_vgpu.c
@@ -17,12 +17,11 @@
17 17
18static int vgpu_determine_L2_size_bytes(struct gk20a *g) 18static int vgpu_determine_L2_size_bytes(struct gk20a *g)
19{ 19{
20 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
21 u32 cache_size = 0; 20 u32 cache_size = 0;
22 21
23 gk20a_dbg_fn(""); 22 gk20a_dbg_fn("");
24 23
25 if (vgpu_get_attribute(platform->virt_handle, 24 if (vgpu_get_attribute(vgpu_get_handle(g),
26 TEGRA_VGPU_ATTRIB_L2_SIZE, &cache_size)) 25 TEGRA_VGPU_ATTRIB_L2_SIZE, &cache_size))
27 dev_err(dev_from_gk20a(g), "unable to get L2 size\n"); 26 dev_err(dev_from_gk20a(g), "unable to get L2 size\n");
28 27
@@ -31,22 +30,21 @@ static int vgpu_determine_L2_size_bytes(struct gk20a *g)
31 30
32static int vgpu_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) 31static int vgpu_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
33{ 32{
34 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
35 u32 max_comptag_lines = 0; 33 u32 max_comptag_lines = 0;
36 int err; 34 int err;
37 35
38 gk20a_dbg_fn(""); 36 gk20a_dbg_fn("");
39 37
40 err = vgpu_get_attribute(platform->virt_handle, 38 err = vgpu_get_attribute(vgpu_get_handle(g),
41 TEGRA_VGPU_ATTRIB_CACHELINE_SIZE, 39 TEGRA_VGPU_ATTRIB_CACHELINE_SIZE,
42 &gr->cacheline_size); 40 &gr->cacheline_size);
43 err |= vgpu_get_attribute(platform->virt_handle, 41 err |= vgpu_get_attribute(vgpu_get_handle(g),
44 TEGRA_VGPU_ATTRIB_COMPTAGS_PER_CACHELINE, 42 TEGRA_VGPU_ATTRIB_COMPTAGS_PER_CACHELINE,
45 &gr->comptags_per_cacheline); 43 &gr->comptags_per_cacheline);
46 err |= vgpu_get_attribute(platform->virt_handle, 44 err |= vgpu_get_attribute(vgpu_get_handle(g),
47 TEGRA_VGPU_ATTRIB_SLICES_PER_LTC, 45 TEGRA_VGPU_ATTRIB_SLICES_PER_LTC,
48 &gr->slices_per_ltc); 46 &gr->slices_per_ltc);
49 err |= vgpu_get_attribute(platform->virt_handle, 47 err |= vgpu_get_attribute(vgpu_get_handle(g),
50 TEGRA_VGPU_ATTRIB_COMPTAG_LINES, &max_comptag_lines); 48 TEGRA_VGPU_ATTRIB_COMPTAG_LINES, &max_comptag_lines);
51 if (err) { 49 if (err) {
52 dev_err(dev_from_gk20a(g), "failed to get ctags atributes\n"); 50 dev_err(dev_from_gk20a(g), "failed to get ctags atributes\n");
@@ -65,13 +63,12 @@ static int vgpu_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
65 63
66static void vgpu_ltc_init_fs_state(struct gk20a *g) 64static void vgpu_ltc_init_fs_state(struct gk20a *g)
67{ 65{
68 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
69 u32 ltc_count = 0; 66 u32 ltc_count = 0;
70 int err; 67 int err;
71 68
72 gk20a_dbg_fn(""); 69 gk20a_dbg_fn("");
73 70
74 err = vgpu_get_attribute(platform->virt_handle, 71 err = vgpu_get_attribute(vgpu_get_handle(g),
75 TEGRA_VGPU_ATTRIB_LTC_COUNT, &ltc_count); 72 TEGRA_VGPU_ATTRIB_LTC_COUNT, &ltc_count);
76 WARN_ON(err); 73 WARN_ON(err);
77 g->ltc_count = ltc_count; 74 g->ltc_count = ltc_count;
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index 6b741cd4..b256598f 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Virtualized GPU Memory Management 2 * Virtualized GPU Memory Management
3 * 3 *
4 * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -86,7 +86,6 @@ static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
86 int err = 0; 86 int err = 0;
87 struct device *d = dev_from_vm(vm); 87 struct device *d = dev_from_vm(vm);
88 struct gk20a *g = gk20a_from_vm(vm); 88 struct gk20a *g = gk20a_from_vm(vm);
89 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
90 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(d); 89 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(d);
91 struct tegra_vgpu_cmd_msg msg; 90 struct tegra_vgpu_cmd_msg msg;
92 struct tegra_vgpu_as_map_params *p = &msg.params.as_map; 91 struct tegra_vgpu_as_map_params *p = &msg.params.as_map;
@@ -114,7 +113,7 @@ static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
114 prot = TEGRA_VGPU_MAP_PROT_NONE; 113 prot = TEGRA_VGPU_MAP_PROT_NONE;
115 114
116 msg.cmd = TEGRA_VGPU_CMD_AS_MAP; 115 msg.cmd = TEGRA_VGPU_CMD_AS_MAP;
117 msg.handle = platform->virt_handle; 116 msg.handle = vgpu_get_handle(g);
118 p->handle = vm->handle; 117 p->handle = vm->handle;
119 p->addr = addr; 118 p->addr = addr;
120 p->gpu_va = map_offset; 119 p->gpu_va = map_offset;
@@ -164,7 +163,6 @@ static void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
164 struct vm_gk20a_mapping_batch *batch) 163 struct vm_gk20a_mapping_batch *batch)
165{ 164{
166 struct gk20a *g = gk20a_from_vm(vm); 165 struct gk20a *g = gk20a_from_vm(vm);
167 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
168 struct tegra_vgpu_cmd_msg msg; 166 struct tegra_vgpu_cmd_msg msg;
169 struct tegra_vgpu_as_map_params *p = &msg.params.as_map; 167 struct tegra_vgpu_as_map_params *p = &msg.params.as_map;
170 int err; 168 int err;
@@ -181,7 +179,7 @@ static void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
181 } 179 }
182 180
183 msg.cmd = TEGRA_VGPU_CMD_AS_UNMAP; 181 msg.cmd = TEGRA_VGPU_CMD_AS_UNMAP;
184 msg.handle = platform->virt_handle; 182 msg.handle = vgpu_get_handle(g);
185 p->handle = vm->handle; 183 p->handle = vm->handle;
186 p->gpu_va = vaddr; 184 p->gpu_va = vaddr;
187 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 185 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
@@ -195,7 +193,6 @@ static void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
195static void vgpu_vm_remove_support(struct vm_gk20a *vm) 193static void vgpu_vm_remove_support(struct vm_gk20a *vm)
196{ 194{
197 struct gk20a *g = vm->mm->g; 195 struct gk20a *g = vm->mm->g;
198 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
199 struct mapped_buffer_node *mapped_buffer; 196 struct mapped_buffer_node *mapped_buffer;
200 struct vm_reserved_va_node *va_node, *va_node_tmp; 197 struct vm_reserved_va_node *va_node, *va_node_tmp;
201 struct tegra_vgpu_cmd_msg msg; 198 struct tegra_vgpu_cmd_msg msg;
@@ -225,7 +222,7 @@ static void vgpu_vm_remove_support(struct vm_gk20a *vm)
225 } 222 }
226 223
227 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE; 224 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE;
228 msg.handle = platform->virt_handle; 225 msg.handle = vgpu_get_handle(g);
229 p->handle = vm->handle; 226 p->handle = vm->handle;
230 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 227 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
231 WARN_ON(err || msg.ret); 228 WARN_ON(err || msg.ret);
@@ -244,7 +241,6 @@ static void vgpu_vm_remove_support(struct vm_gk20a *vm)
244 241
245u64 vgpu_bar1_map(struct gk20a *g, struct sg_table **sgt, u64 size) 242u64 vgpu_bar1_map(struct gk20a *g, struct sg_table **sgt, u64 size)
246{ 243{
247 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
248 struct dma_iommu_mapping *mapping = 244 struct dma_iommu_mapping *mapping =
249 to_dma_iommu_mapping(dev_from_gk20a(g)); 245 to_dma_iommu_mapping(dev_from_gk20a(g));
250 u64 addr = g->ops.mm.get_iova_addr(g, (*sgt)->sgl, 0); 246 u64 addr = g->ops.mm.get_iova_addr(g, (*sgt)->sgl, 0);
@@ -253,7 +249,7 @@ u64 vgpu_bar1_map(struct gk20a *g, struct sg_table **sgt, u64 size)
253 int err; 249 int err;
254 250
255 msg.cmd = TEGRA_VGPU_CMD_MAP_BAR1; 251 msg.cmd = TEGRA_VGPU_CMD_MAP_BAR1;
256 msg.handle = platform->virt_handle; 252 msg.handle = vgpu_get_handle(g);
257 p->addr = addr; 253 p->addr = addr;
258 p->size = size; 254 p->size = size;
259 p->iova = mapping ? 1 : 0; 255 p->iova = mapping ? 1 : 0;
@@ -320,7 +316,7 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
320 vm->va_limit = mm->channel.user_size + mm->channel.kernel_size; 316 vm->va_limit = mm->channel.user_size + mm->channel.kernel_size;
321 317
322 msg.cmd = TEGRA_VGPU_CMD_AS_ALLOC_SHARE; 318 msg.cmd = TEGRA_VGPU_CMD_AS_ALLOC_SHARE;
323 msg.handle = platform->virt_handle; 319 msg.handle = vgpu_get_handle(g);
324 p->size = vm->va_limit; 320 p->size = vm->va_limit;
325 p->big_page_size = vm->big_page_size; 321 p->big_page_size = vm->big_page_size;
326 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 322 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
@@ -434,7 +430,7 @@ clean_up_small_allocator:
434 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_small]); 430 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_small]);
435clean_up_share: 431clean_up_share:
436 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE; 432 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE;
437 msg.handle = platform->virt_handle; 433 msg.handle = vgpu_get_handle(g);
438 p->handle = vm->handle; 434 p->handle = vm->handle;
439 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 435 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
440 WARN_ON(err || msg.ret); 436 WARN_ON(err || msg.ret);
@@ -448,7 +444,6 @@ static int vgpu_vm_bind_channel(struct gk20a_as_share *as_share,
448 struct channel_gk20a *ch) 444 struct channel_gk20a *ch)
449{ 445{
450 struct vm_gk20a *vm = as_share->vm; 446 struct vm_gk20a *vm = as_share->vm;
451 struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev);
452 struct tegra_vgpu_cmd_msg msg; 447 struct tegra_vgpu_cmd_msg msg;
453 struct tegra_vgpu_as_bind_share_params *p = &msg.params.as_bind_share; 448 struct tegra_vgpu_as_bind_share_params *p = &msg.params.as_bind_share;
454 int err; 449 int err;
@@ -457,7 +452,7 @@ static int vgpu_vm_bind_channel(struct gk20a_as_share *as_share,
457 452
458 ch->vm = vm; 453 ch->vm = vm;
459 msg.cmd = TEGRA_VGPU_CMD_AS_BIND_SHARE; 454 msg.cmd = TEGRA_VGPU_CMD_AS_BIND_SHARE;
460 msg.handle = platform->virt_handle; 455 msg.handle = vgpu_get_handle(ch->g);
461 p->as_handle = vm->handle; 456 p->as_handle = vm->handle;
462 p->chan_handle = ch->virt_ctx; 457 p->chan_handle = ch->virt_ctx;
463 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 458 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
@@ -485,26 +480,23 @@ static void vgpu_cache_maint(u64 handle, u8 op)
485 480
486static int vgpu_mm_fb_flush(struct gk20a *g) 481static int vgpu_mm_fb_flush(struct gk20a *g)
487{ 482{
488 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
489 483
490 gk20a_dbg_fn(""); 484 gk20a_dbg_fn("");
491 485
492 vgpu_cache_maint(platform->virt_handle, TEGRA_VGPU_FB_FLUSH); 486 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_FB_FLUSH);
493 return 0; 487 return 0;
494} 488}
495 489
496static void vgpu_mm_l2_invalidate(struct gk20a *g) 490static void vgpu_mm_l2_invalidate(struct gk20a *g)
497{ 491{
498 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
499 492
500 gk20a_dbg_fn(""); 493 gk20a_dbg_fn("");
501 494
502 vgpu_cache_maint(platform->virt_handle, TEGRA_VGPU_L2_MAINT_INV); 495 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_L2_MAINT_INV);
503} 496}
504 497
505static void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate) 498static void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate)
506{ 499{
507 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
508 u8 op; 500 u8 op;
509 501
510 gk20a_dbg_fn(""); 502 gk20a_dbg_fn("");
@@ -514,13 +506,12 @@ static void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate)
514 else 506 else
515 op = TEGRA_VGPU_L2_MAINT_FLUSH; 507 op = TEGRA_VGPU_L2_MAINT_FLUSH;
516 508
517 vgpu_cache_maint(platform->virt_handle, op); 509 vgpu_cache_maint(vgpu_get_handle(g), op);
518} 510}
519 511
520static void vgpu_mm_tlb_invalidate(struct vm_gk20a *vm) 512static void vgpu_mm_tlb_invalidate(struct vm_gk20a *vm)
521{ 513{
522 struct gk20a *g = gk20a_from_vm(vm); 514 struct gk20a *g = gk20a_from_vm(vm);
523 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
524 struct tegra_vgpu_cmd_msg msg; 515 struct tegra_vgpu_cmd_msg msg;
525 struct tegra_vgpu_as_invalidate_params *p = &msg.params.as_invalidate; 516 struct tegra_vgpu_as_invalidate_params *p = &msg.params.as_invalidate;
526 int err; 517 int err;
@@ -528,7 +519,7 @@ static void vgpu_mm_tlb_invalidate(struct vm_gk20a *vm)
528 gk20a_dbg_fn(""); 519 gk20a_dbg_fn("");
529 520
530 msg.cmd = TEGRA_VGPU_CMD_AS_INVALIDATE; 521 msg.cmd = TEGRA_VGPU_CMD_AS_INVALIDATE;
531 msg.handle = platform->virt_handle; 522 msg.handle = vgpu_get_handle(g);
532 p->handle = vm->handle; 523 p->handle = vm->handle;
533 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 524 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
534 WARN_ON(err || msg.ret); 525 WARN_ON(err || msg.ret);
@@ -536,7 +527,6 @@ static void vgpu_mm_tlb_invalidate(struct vm_gk20a *vm)
536 527
537static void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable) 528static void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable)
538{ 529{
539 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
540 struct tegra_vgpu_cmd_msg msg; 530 struct tegra_vgpu_cmd_msg msg;
541 struct tegra_vgpu_mmu_debug_mode *p = &msg.params.mmu_debug_mode; 531 struct tegra_vgpu_mmu_debug_mode *p = &msg.params.mmu_debug_mode;
542 int err; 532 int err;
@@ -544,7 +534,7 @@ static void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable)
544 gk20a_dbg_fn(""); 534 gk20a_dbg_fn("");
545 535
546 msg.cmd = TEGRA_VGPU_CMD_SET_MMU_DEBUG_MODE; 536 msg.cmd = TEGRA_VGPU_CMD_SET_MMU_DEBUG_MODE;
547 msg.handle = platform->virt_handle; 537 msg.handle = vgpu_get_handle(g);
548 p->enable = (u32)enable; 538 p->enable = (u32)enable;
549 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 539 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
550 WARN_ON(err || msg.ret); 540 WARN_ON(err || msg.ret);
diff --git a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c
index 820a3db3..2033fd7a 100644
--- a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c
@@ -26,7 +26,6 @@
26static int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg, 26static int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg,
27 struct channel_gk20a *ch) 27 struct channel_gk20a *ch)
28{ 28{
29 struct gk20a_platform *platform = gk20a_get_platform(tsg->g->dev);
30 struct tegra_vgpu_cmd_msg msg = {}; 29 struct tegra_vgpu_cmd_msg msg = {};
31 struct tegra_vgpu_tsg_bind_unbind_channel_params *p = 30 struct tegra_vgpu_tsg_bind_unbind_channel_params *p =
32 &msg.params.tsg_bind_unbind_channel; 31 &msg.params.tsg_bind_unbind_channel;
@@ -39,7 +38,7 @@ static int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg,
39 return err; 38 return err;
40 39
41 msg.cmd = TEGRA_VGPU_CMD_TSG_BIND_CHANNEL; 40 msg.cmd = TEGRA_VGPU_CMD_TSG_BIND_CHANNEL;
42 msg.handle = platform->virt_handle; 41 msg.handle = vgpu_get_handle(tsg->g);
43 p->tsg_id = tsg->tsgid; 42 p->tsg_id = tsg->tsgid;
44 p->ch_handle = ch->virt_ctx; 43 p->ch_handle = ch->virt_ctx;
45 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 44 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
@@ -56,7 +55,6 @@ static int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg,
56 55
57static int vgpu_tsg_unbind_channel(struct channel_gk20a *ch) 56static int vgpu_tsg_unbind_channel(struct channel_gk20a *ch)
58{ 57{
59 struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev);
60 struct tegra_vgpu_cmd_msg msg = {}; 58 struct tegra_vgpu_cmd_msg msg = {};
61 struct tegra_vgpu_tsg_bind_unbind_channel_params *p = 59 struct tegra_vgpu_tsg_bind_unbind_channel_params *p =
62 &msg.params.tsg_bind_unbind_channel; 60 &msg.params.tsg_bind_unbind_channel;
@@ -69,7 +67,7 @@ static int vgpu_tsg_unbind_channel(struct channel_gk20a *ch)
69 return err; 67 return err;
70 68
71 msg.cmd = TEGRA_VGPU_CMD_TSG_UNBIND_CHANNEL; 69 msg.cmd = TEGRA_VGPU_CMD_TSG_UNBIND_CHANNEL;
72 msg.handle = platform->virt_handle; 70 msg.handle = vgpu_get_handle(ch->g);
73 p->ch_handle = ch->virt_ctx; 71 p->ch_handle = ch->virt_ctx;
74 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 72 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
75 err = err ? err : msg.ret; 73 err = err ? err : msg.ret;
@@ -80,7 +78,6 @@ static int vgpu_tsg_unbind_channel(struct channel_gk20a *ch)
80 78
81static int vgpu_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice) 79static int vgpu_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice)
82{ 80{
83 struct gk20a_platform *platform = gk20a_get_platform(tsg->g->dev);
84 struct tegra_vgpu_cmd_msg msg = {0}; 81 struct tegra_vgpu_cmd_msg msg = {0};
85 struct tegra_vgpu_tsg_timeslice_params *p = 82 struct tegra_vgpu_tsg_timeslice_params *p =
86 &msg.params.tsg_timeslice; 83 &msg.params.tsg_timeslice;
@@ -89,7 +86,7 @@ static int vgpu_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice)
89 gk20a_dbg_fn(""); 86 gk20a_dbg_fn("");
90 87
91 msg.cmd = TEGRA_VGPU_CMD_TSG_SET_TIMESLICE; 88 msg.cmd = TEGRA_VGPU_CMD_TSG_SET_TIMESLICE;
92 msg.handle = platform->virt_handle; 89 msg.handle = vgpu_get_handle(tsg->g);
93 p->tsg_id = tsg->tsgid; 90 p->tsg_id = tsg->tsgid;
94 p->timeslice_us = timeslice; 91 p->timeslice_us = timeslice;
95 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 92 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
diff --git a/drivers/gpu/nvgpu/vgpu/vgpu.c b/drivers/gpu/nvgpu/vgpu/vgpu.c
index c03086e1..300ffc98 100644
--- a/drivers/gpu/nvgpu/vgpu/vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/vgpu.c
@@ -187,7 +187,7 @@ static int vgpu_intr_thread(void *dev_id)
187static void vgpu_remove_support(struct device *dev) 187static void vgpu_remove_support(struct device *dev)
188{ 188{
189 struct gk20a *g = get_gk20a(dev); 189 struct gk20a *g = get_gk20a(dev);
190 struct gk20a_platform *platform = gk20a_get_platform(dev); 190 struct vgpu_priv_data *priv = vgpu_get_priv_data_from_dev(dev);
191 struct tegra_vgpu_intr_msg msg; 191 struct tegra_vgpu_intr_msg msg;
192 int err; 192 int err;
193 193
@@ -208,7 +208,7 @@ static void vgpu_remove_support(struct device *dev)
208 TEGRA_GR_COMM_ID_SELF, TEGRA_VGPU_QUEUE_INTR, 208 TEGRA_GR_COMM_ID_SELF, TEGRA_VGPU_QUEUE_INTR,
209 &msg, sizeof(msg)); 209 &msg, sizeof(msg));
210 WARN_ON(err); 210 WARN_ON(err);
211 kthread_stop(platform->intr_handler); 211 kthread_stop(priv->intr_handler);
212 212
213 /* free mappings to registers, etc*/ 213 /* free mappings to registers, etc*/
214 214
@@ -271,11 +271,10 @@ int vgpu_pm_prepare_poweroff(struct device *dev)
271static void vgpu_detect_chip(struct gk20a *g) 271static void vgpu_detect_chip(struct gk20a *g)
272{ 272{
273 struct nvgpu_gpu_characteristics *gpu = &g->gpu_characteristics; 273 struct nvgpu_gpu_characteristics *gpu = &g->gpu_characteristics;
274 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
275 274
276 u32 mc_boot_0_value; 275 u32 mc_boot_0_value;
277 276
278 if (vgpu_get_attribute(platform->virt_handle, 277 if (vgpu_get_attribute(vgpu_get_handle(g),
279 TEGRA_VGPU_ATTRIB_PMC_BOOT_0, 278 TEGRA_VGPU_ATTRIB_PMC_BOOT_0,
280 &mc_boot_0_value)) { 279 &mc_boot_0_value)) {
281 gk20a_err(dev_from_gk20a(g), "failed to detect chip"); 280 gk20a_err(dev_from_gk20a(g), "failed to detect chip");
@@ -297,7 +296,6 @@ static void vgpu_detect_chip(struct gk20a *g)
297 296
298static int vgpu_init_gpu_characteristics(struct gk20a *g) 297static int vgpu_init_gpu_characteristics(struct gk20a *g)
299{ 298{
300 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
301 u32 max_freq; 299 u32 max_freq;
302 int err; 300 int err;
303 301
@@ -307,7 +305,7 @@ static int vgpu_init_gpu_characteristics(struct gk20a *g)
307 if (err) 305 if (err)
308 return err; 306 return err;
309 307
310 if (vgpu_get_attribute(platform->virt_handle, 308 if (vgpu_get_attribute(vgpu_get_handle(g),
311 TEGRA_VGPU_ATTRIB_MAX_FREQ, &max_freq)) 309 TEGRA_VGPU_ATTRIB_MAX_FREQ, &max_freq))
312 return -ENOMEM; 310 return -ENOMEM;
313 311
@@ -318,7 +316,6 @@ static int vgpu_init_gpu_characteristics(struct gk20a *g)
318 316
319static int vgpu_read_ptimer(struct gk20a *g, u64 *value) 317static int vgpu_read_ptimer(struct gk20a *g, u64 *value)
320{ 318{
321 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
322 struct tegra_vgpu_cmd_msg msg = {0}; 319 struct tegra_vgpu_cmd_msg msg = {0};
323 struct tegra_vgpu_read_ptimer_params *p = &msg.params.read_ptimer; 320 struct tegra_vgpu_read_ptimer_params *p = &msg.params.read_ptimer;
324 int err; 321 int err;
@@ -326,7 +323,7 @@ static int vgpu_read_ptimer(struct gk20a *g, u64 *value)
326 gk20a_dbg_fn(""); 323 gk20a_dbg_fn("");
327 324
328 msg.cmd = TEGRA_VGPU_CMD_READ_PTIMER; 325 msg.cmd = TEGRA_VGPU_CMD_READ_PTIMER;
329 msg.handle = platform->virt_handle; 326 msg.handle = vgpu_get_handle(g);
330 327
331 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 328 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
332 err = err ? err : msg.ret; 329 err = err ? err : msg.ret;
@@ -441,7 +438,6 @@ static int vgpu_qos_notify(struct notifier_block *nb,
441 struct gk20a_scale_profile *profile = 438 struct gk20a_scale_profile *profile =
442 container_of(nb, struct gk20a_scale_profile, 439 container_of(nb, struct gk20a_scale_profile,
443 qos_notify_block); 440 qos_notify_block);
444 struct gk20a_platform *platform = gk20a_get_platform(profile->dev);
445 struct tegra_vgpu_cmd_msg msg = {}; 441 struct tegra_vgpu_cmd_msg msg = {};
446 struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate; 442 struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate;
447 u32 max_freq; 443 u32 max_freq;
@@ -452,7 +448,7 @@ static int vgpu_qos_notify(struct notifier_block *nb,
452 max_freq = (u32)pm_qos_read_max_bound(PM_QOS_GPU_FREQ_BOUNDS); 448 max_freq = (u32)pm_qos_read_max_bound(PM_QOS_GPU_FREQ_BOUNDS);
453 449
454 msg.cmd = TEGRA_VGPU_CMD_SET_GPU_CLK_RATE; 450 msg.cmd = TEGRA_VGPU_CMD_SET_GPU_CLK_RATE;
455 msg.handle = platform->virt_handle; 451 msg.handle = vgpu_get_handle_from_dev(profile->dev);
456 p->rate = max_freq; 452 p->rate = max_freq;
457 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 453 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
458 err = err ? err : msg.ret; 454 err = err ? err : msg.ret;
@@ -510,6 +506,7 @@ int vgpu_probe(struct platform_device *pdev)
510 int err; 506 int err;
511 struct device *dev = &pdev->dev; 507 struct device *dev = &pdev->dev;
512 struct gk20a_platform *platform = gk20a_get_platform(dev); 508 struct gk20a_platform *platform = gk20a_get_platform(dev);
509 struct vgpu_priv_data *priv;
513 510
514 if (!platform) { 511 if (!platform) {
515 dev_err(dev, "no platform data\n"); 512 dev_err(dev, "no platform data\n");
@@ -518,6 +515,10 @@ int vgpu_probe(struct platform_device *pdev)
518 515
519 gk20a_dbg_fn(""); 516 gk20a_dbg_fn("");
520 517
518 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
519 if (!priv)
520 return -ENOMEM;
521
521 gk20a = kzalloc(sizeof(struct gk20a), GFP_KERNEL); 522 gk20a = kzalloc(sizeof(struct gk20a), GFP_KERNEL);
522 if (!gk20a) { 523 if (!gk20a) {
523 dev_err(dev, "couldn't allocate gk20a support"); 524 dev_err(dev, "couldn't allocate gk20a support");
@@ -525,6 +526,7 @@ int vgpu_probe(struct platform_device *pdev)
525 } 526 }
526 527
527 platform->g = gk20a; 528 platform->g = gk20a;
529 platform->vgpu_priv = priv;
528 gk20a->dev = dev; 530 gk20a->dev = dev;
529 531
530 err = gk20a_user_init(dev, INTERFACE_NAME, &nvgpu_class); 532 err = gk20a_user_init(dev, INTERFACE_NAME, &nvgpu_class);
@@ -564,15 +566,15 @@ int vgpu_probe(struct platform_device *pdev)
564 return -ENOSYS; 566 return -ENOSYS;
565 } 567 }
566 568
567 platform->virt_handle = vgpu_connect(); 569 priv->virt_handle = vgpu_connect();
568 if (!platform->virt_handle) { 570 if (!priv->virt_handle) {
569 dev_err(dev, "failed to connect to server node\n"); 571 dev_err(dev, "failed to connect to server node\n");
570 vgpu_comm_deinit(); 572 vgpu_comm_deinit();
571 return -ENOSYS; 573 return -ENOSYS;
572 } 574 }
573 575
574 platform->intr_handler = kthread_run(vgpu_intr_thread, gk20a, "gk20a"); 576 priv->intr_handler = kthread_run(vgpu_intr_thread, gk20a, "gk20a");
575 if (IS_ERR(platform->intr_handler)) 577 if (IS_ERR(priv->intr_handler))
576 return -ENOMEM; 578 return -ENOMEM;
577 579
578 gk20a_debug_init(dev); 580 gk20a_debug_init(dev);
diff --git a/drivers/gpu/nvgpu/vgpu/vgpu.h b/drivers/gpu/nvgpu/vgpu/vgpu.h
index f79c8aab..e1fff966 100644
--- a/drivers/gpu/nvgpu/vgpu/vgpu.h
+++ b/drivers/gpu/nvgpu/vgpu/vgpu.h
@@ -21,6 +21,42 @@
21#include "gk20a/gk20a.h" 21#include "gk20a/gk20a.h"
22 22
23#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION 23#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
24
25struct vgpu_priv_data {
26 u64 virt_handle;
27 struct task_struct *intr_handler;
28};
29
30static inline
31struct vgpu_priv_data *vgpu_get_priv_data_from_dev(struct device *dev)
32{
33 struct gk20a_platform *plat = gk20a_get_platform(dev);
34
35 return (struct vgpu_priv_data *)plat->vgpu_priv;
36}
37
38static inline struct vgpu_priv_data *vgpu_get_priv_data(struct gk20a *g)
39{
40 return vgpu_get_priv_data_from_dev(g->dev);
41}
42
43static inline u64 vgpu_get_handle_from_dev(struct device *dev)
44{
45 struct vgpu_priv_data *priv = vgpu_get_priv_data_from_dev(dev);
46
47 if (unlikely(!priv)) {
48 dev_err(dev, "invalid vgpu_priv_data in %s\n", __func__);
49 return INT_MAX;
50 }
51
52 return priv->virt_handle;
53}
54
55static inline u64 vgpu_get_handle(struct gk20a *g)
56{
57 return vgpu_get_handle_from_dev(g->dev);
58}
59
24int vgpu_pm_prepare_poweroff(struct device *dev); 60int vgpu_pm_prepare_poweroff(struct device *dev);
25int vgpu_pm_finalize_poweron(struct device *dev); 61int vgpu_pm_finalize_poweron(struct device *dev);
26int vgpu_probe(struct platform_device *dev); 62int vgpu_probe(struct platform_device *dev);