diff options
author | Richard Zhao <rizhao@nvidia.com> | 2016-07-21 19:51:40 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2016-08-15 14:41:16 -0400 |
commit | e1438818b90c5b0d73aae800b12bd6b36aec5142 (patch) | |
tree | f0582cda23552526c3067e90f4cb74b461d50d73 /drivers/gpu/nvgpu/vgpu/fifo_vgpu.c | |
parent | 33ff34887f560449828e79170a2a36a97496eeec (diff) |
gpu: nvgpu: vgpu: add vgpu private data and helper functions
Move vgpu private data to a dedicated structure and allocate it
at probe time. Also add virt_handle helper function which is used
everywhere.
JIRA VFND-2103
Change-Id: I125911420be72ca9be948125d8357fa85d1d3afd
Signed-off-by: Richard Zhao <rizhao@nvidia.com>
Reviewed-on: http://git-master/r/1185206
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vladislav Buzov <vbuzov@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/fifo_vgpu.c')
-rw-r--r-- | drivers/gpu/nvgpu/vgpu/fifo_vgpu.c | 48 |
1 files changed, 16 insertions, 32 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c index 8d3a5e9f..baab42c8 100644 --- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c | |||
@@ -23,7 +23,6 @@ | |||
23 | 23 | ||
24 | static void vgpu_channel_bind(struct channel_gk20a *ch) | 24 | static void vgpu_channel_bind(struct channel_gk20a *ch) |
25 | { | 25 | { |
26 | struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev); | ||
27 | struct tegra_vgpu_cmd_msg msg; | 26 | struct tegra_vgpu_cmd_msg msg; |
28 | struct tegra_vgpu_channel_config_params *p = | 27 | struct tegra_vgpu_channel_config_params *p = |
29 | &msg.params.channel_config; | 28 | &msg.params.channel_config; |
@@ -32,7 +31,7 @@ static void vgpu_channel_bind(struct channel_gk20a *ch) | |||
32 | gk20a_dbg_info("bind channel %d", ch->hw_chid); | 31 | gk20a_dbg_info("bind channel %d", ch->hw_chid); |
33 | 32 | ||
34 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND; | 33 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND; |
35 | msg.handle = platform->virt_handle; | 34 | msg.handle = vgpu_get_handle(ch->g); |
36 | p->handle = ch->virt_ctx; | 35 | p->handle = ch->virt_ctx; |
37 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | 36 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); |
38 | WARN_ON(err || msg.ret); | 37 | WARN_ON(err || msg.ret); |
@@ -42,7 +41,6 @@ static void vgpu_channel_bind(struct channel_gk20a *ch) | |||
42 | 41 | ||
43 | static void vgpu_channel_unbind(struct channel_gk20a *ch) | 42 | static void vgpu_channel_unbind(struct channel_gk20a *ch) |
44 | { | 43 | { |
45 | struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev); | ||
46 | 44 | ||
47 | gk20a_dbg_fn(""); | 45 | gk20a_dbg_fn(""); |
48 | 46 | ||
@@ -53,7 +51,7 @@ static void vgpu_channel_unbind(struct channel_gk20a *ch) | |||
53 | int err; | 51 | int err; |
54 | 52 | ||
55 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_UNBIND; | 53 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_UNBIND; |
56 | msg.handle = platform->virt_handle; | 54 | msg.handle = vgpu_get_handle(ch->g); |
57 | p->handle = ch->virt_ctx; | 55 | p->handle = ch->virt_ctx; |
58 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | 56 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); |
59 | WARN_ON(err || msg.ret); | 57 | WARN_ON(err || msg.ret); |
@@ -64,7 +62,6 @@ static void vgpu_channel_unbind(struct channel_gk20a *ch) | |||
64 | 62 | ||
65 | static int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) | 63 | static int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) |
66 | { | 64 | { |
67 | struct gk20a_platform *platform = gk20a_get_platform(g->dev); | ||
68 | struct tegra_vgpu_cmd_msg msg; | 65 | struct tegra_vgpu_cmd_msg msg; |
69 | struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; | 66 | struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; |
70 | int err; | 67 | int err; |
@@ -72,7 +69,7 @@ static int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) | |||
72 | gk20a_dbg_fn(""); | 69 | gk20a_dbg_fn(""); |
73 | 70 | ||
74 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX; | 71 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX; |
75 | msg.handle = platform->virt_handle; | 72 | msg.handle = vgpu_get_handle(g); |
76 | p->id = ch->hw_chid; | 73 | p->id = ch->hw_chid; |
77 | p->pid = (u64)current->tgid; | 74 | p->pid = (u64)current->tgid; |
78 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | 75 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); |
@@ -88,7 +85,6 @@ static int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) | |||
88 | 85 | ||
89 | static void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch) | 86 | static void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch) |
90 | { | 87 | { |
91 | struct gk20a_platform *platform = gk20a_get_platform(g->dev); | ||
92 | struct tegra_vgpu_cmd_msg msg; | 88 | struct tegra_vgpu_cmd_msg msg; |
93 | struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; | 89 | struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; |
94 | int err; | 90 | int err; |
@@ -96,7 +92,7 @@ static void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch) | |||
96 | gk20a_dbg_fn(""); | 92 | gk20a_dbg_fn(""); |
97 | 93 | ||
98 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWCTX; | 94 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FREE_HWCTX; |
99 | msg.handle = platform->virt_handle; | 95 | msg.handle = vgpu_get_handle(g); |
100 | p->handle = ch->virt_ctx; | 96 | p->handle = ch->virt_ctx; |
101 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | 97 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); |
102 | WARN_ON(err || msg.ret); | 98 | WARN_ON(err || msg.ret); |
@@ -104,7 +100,6 @@ static void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch) | |||
104 | 100 | ||
105 | static void vgpu_channel_enable(struct channel_gk20a *ch) | 101 | static void vgpu_channel_enable(struct channel_gk20a *ch) |
106 | { | 102 | { |
107 | struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev); | ||
108 | struct tegra_vgpu_cmd_msg msg; | 103 | struct tegra_vgpu_cmd_msg msg; |
109 | struct tegra_vgpu_channel_config_params *p = | 104 | struct tegra_vgpu_channel_config_params *p = |
110 | &msg.params.channel_config; | 105 | &msg.params.channel_config; |
@@ -113,7 +108,7 @@ static void vgpu_channel_enable(struct channel_gk20a *ch) | |||
113 | gk20a_dbg_fn(""); | 108 | gk20a_dbg_fn(""); |
114 | 109 | ||
115 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ENABLE; | 110 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ENABLE; |
116 | msg.handle = platform->virt_handle; | 111 | msg.handle = vgpu_get_handle(ch->g); |
117 | p->handle = ch->virt_ctx; | 112 | p->handle = ch->virt_ctx; |
118 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | 113 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); |
119 | WARN_ON(err || msg.ret); | 114 | WARN_ON(err || msg.ret); |
@@ -121,7 +116,6 @@ static void vgpu_channel_enable(struct channel_gk20a *ch) | |||
121 | 116 | ||
122 | static void vgpu_channel_disable(struct channel_gk20a *ch) | 117 | static void vgpu_channel_disable(struct channel_gk20a *ch) |
123 | { | 118 | { |
124 | struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev); | ||
125 | struct tegra_vgpu_cmd_msg msg; | 119 | struct tegra_vgpu_cmd_msg msg; |
126 | struct tegra_vgpu_channel_config_params *p = | 120 | struct tegra_vgpu_channel_config_params *p = |
127 | &msg.params.channel_config; | 121 | &msg.params.channel_config; |
@@ -130,7 +124,7 @@ static void vgpu_channel_disable(struct channel_gk20a *ch) | |||
130 | gk20a_dbg_fn(""); | 124 | gk20a_dbg_fn(""); |
131 | 125 | ||
132 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_DISABLE; | 126 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_DISABLE; |
133 | msg.handle = platform->virt_handle; | 127 | msg.handle = vgpu_get_handle(ch->g); |
134 | p->handle = ch->virt_ctx; | 128 | p->handle = ch->virt_ctx; |
135 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | 129 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); |
136 | WARN_ON(err || msg.ret); | 130 | WARN_ON(err || msg.ret); |
@@ -139,7 +133,6 @@ static void vgpu_channel_disable(struct channel_gk20a *ch) | |||
139 | static int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base, | 133 | static int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base, |
140 | u32 gpfifo_entries, u32 flags) | 134 | u32 gpfifo_entries, u32 flags) |
141 | { | 135 | { |
142 | struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev); | ||
143 | struct device __maybe_unused *d = dev_from_gk20a(ch->g); | 136 | struct device __maybe_unused *d = dev_from_gk20a(ch->g); |
144 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(d); | 137 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(d); |
145 | struct tegra_vgpu_cmd_msg msg; | 138 | struct tegra_vgpu_cmd_msg msg; |
@@ -149,7 +142,7 @@ static int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base, | |||
149 | gk20a_dbg_fn(""); | 142 | gk20a_dbg_fn(""); |
150 | 143 | ||
151 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SETUP_RAMFC; | 144 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SETUP_RAMFC; |
152 | msg.handle = platform->virt_handle; | 145 | msg.handle = vgpu_get_handle(ch->g); |
153 | p->handle = ch->virt_ctx; | 146 | p->handle = ch->virt_ctx; |
154 | p->gpfifo_va = gpfifo_base; | 147 | p->gpfifo_va = gpfifo_base; |
155 | p->num_entries = gpfifo_entries; | 148 | p->num_entries = gpfifo_entries; |
@@ -242,7 +235,6 @@ clean_up_runlist: | |||
242 | 235 | ||
243 | static int vgpu_init_fifo_setup_sw(struct gk20a *g) | 236 | static int vgpu_init_fifo_setup_sw(struct gk20a *g) |
244 | { | 237 | { |
245 | struct gk20a_platform *platform = gk20a_get_platform(g->dev); | ||
246 | struct fifo_gk20a *f = &g->fifo; | 238 | struct fifo_gk20a *f = &g->fifo; |
247 | struct device *d = dev_from_gk20a(g); | 239 | struct device *d = dev_from_gk20a(g); |
248 | int chid, err = 0; | 240 | int chid, err = 0; |
@@ -256,7 +248,7 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g) | |||
256 | 248 | ||
257 | f->g = g; | 249 | f->g = g; |
258 | 250 | ||
259 | err = vgpu_get_attribute(platform->virt_handle, | 251 | err = vgpu_get_attribute(vgpu_get_handle(g), |
260 | TEGRA_VGPU_ATTRIB_NUM_CHANNELS, | 252 | TEGRA_VGPU_ATTRIB_NUM_CHANNELS, |
261 | &f->num_channels); | 253 | &f->num_channels); |
262 | if (err) | 254 | if (err) |
@@ -411,7 +403,6 @@ int vgpu_init_fifo_support(struct gk20a *g) | |||
411 | 403 | ||
412 | static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid) | 404 | static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid) |
413 | { | 405 | { |
414 | struct gk20a_platform *platform = gk20a_get_platform(g->dev); | ||
415 | struct fifo_gk20a *f = &g->fifo; | 406 | struct fifo_gk20a *f = &g->fifo; |
416 | struct tegra_vgpu_cmd_msg msg; | 407 | struct tegra_vgpu_cmd_msg msg; |
417 | struct tegra_vgpu_channel_config_params *p = | 408 | struct tegra_vgpu_channel_config_params *p = |
@@ -421,7 +412,7 @@ static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid) | |||
421 | gk20a_dbg_fn(""); | 412 | gk20a_dbg_fn(""); |
422 | 413 | ||
423 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_PREEMPT; | 414 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_PREEMPT; |
424 | msg.handle = platform->virt_handle; | 415 | msg.handle = vgpu_get_handle(g); |
425 | p->handle = f->channel[hw_chid].virt_ctx; | 416 | p->handle = f->channel[hw_chid].virt_ctx; |
426 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | 417 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); |
427 | 418 | ||
@@ -436,7 +427,6 @@ static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid) | |||
436 | 427 | ||
437 | static int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) | 428 | static int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) |
438 | { | 429 | { |
439 | struct gk20a_platform *platform = gk20a_get_platform(g->dev); | ||
440 | struct tegra_vgpu_cmd_msg msg; | 430 | struct tegra_vgpu_cmd_msg msg; |
441 | struct tegra_vgpu_tsg_preempt_params *p = | 431 | struct tegra_vgpu_tsg_preempt_params *p = |
442 | &msg.params.tsg_preempt; | 432 | &msg.params.tsg_preempt; |
@@ -445,7 +435,7 @@ static int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) | |||
445 | gk20a_dbg_fn(""); | 435 | gk20a_dbg_fn(""); |
446 | 436 | ||
447 | msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT; | 437 | msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT; |
448 | msg.handle = platform->virt_handle; | 438 | msg.handle = vgpu_get_handle(g); |
449 | p->tsg_id = tsgid; | 439 | p->tsg_id = tsgid; |
450 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | 440 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); |
451 | err = err ? err : msg.ret; | 441 | err = err ? err : msg.ret; |
@@ -490,7 +480,6 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, | |||
490 | u32 hw_chid, bool add, | 480 | u32 hw_chid, bool add, |
491 | bool wait_for_finish) | 481 | bool wait_for_finish) |
492 | { | 482 | { |
493 | struct gk20a_platform *platform = gk20a_get_platform(g->dev); | ||
494 | struct fifo_gk20a *f = &g->fifo; | 483 | struct fifo_gk20a *f = &g->fifo; |
495 | struct fifo_runlist_info_gk20a *runlist; | 484 | struct fifo_runlist_info_gk20a *runlist; |
496 | u16 *runlist_entry = NULL; | 485 | u16 *runlist_entry = NULL; |
@@ -529,7 +518,7 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, | |||
529 | } else /* suspend to remove all channels */ | 518 | } else /* suspend to remove all channels */ |
530 | count = 0; | 519 | count = 0; |
531 | 520 | ||
532 | return vgpu_submit_runlist(platform->virt_handle, runlist_id, | 521 | return vgpu_submit_runlist(vgpu_get_handle(g), runlist_id, |
533 | runlist->mem[0].cpu_va, count); | 522 | runlist->mem[0].cpu_va, count); |
534 | } | 523 | } |
535 | 524 | ||
@@ -566,7 +555,6 @@ static int vgpu_fifo_wait_engine_idle(struct gk20a *g) | |||
566 | 555 | ||
567 | static int vgpu_channel_set_priority(struct channel_gk20a *ch, u32 priority) | 556 | static int vgpu_channel_set_priority(struct channel_gk20a *ch, u32 priority) |
568 | { | 557 | { |
569 | struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev); | ||
570 | struct tegra_vgpu_cmd_msg msg; | 558 | struct tegra_vgpu_cmd_msg msg; |
571 | struct tegra_vgpu_channel_priority_params *p = | 559 | struct tegra_vgpu_channel_priority_params *p = |
572 | &msg.params.channel_priority; | 560 | &msg.params.channel_priority; |
@@ -575,7 +563,7 @@ static int vgpu_channel_set_priority(struct channel_gk20a *ch, u32 priority) | |||
575 | gk20a_dbg_info("channel %d set priority %u", ch->hw_chid, priority); | 563 | gk20a_dbg_info("channel %d set priority %u", ch->hw_chid, priority); |
576 | 564 | ||
577 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_PRIORITY; | 565 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_PRIORITY; |
578 | msg.handle = platform->virt_handle; | 566 | msg.handle = vgpu_get_handle(ch->g); |
579 | p->handle = ch->virt_ctx; | 567 | p->handle = ch->virt_ctx; |
580 | p->priority = priority; | 568 | p->priority = priority; |
581 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | 569 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); |
@@ -589,7 +577,6 @@ static int vgpu_fifo_tsg_set_runlist_interleave(struct gk20a *g, | |||
589 | u32 runlist_id, | 577 | u32 runlist_id, |
590 | u32 new_level) | 578 | u32 new_level) |
591 | { | 579 | { |
592 | struct gk20a_platform *platform = gk20a_get_platform(g->dev); | ||
593 | struct tegra_vgpu_cmd_msg msg = {0}; | 580 | struct tegra_vgpu_cmd_msg msg = {0}; |
594 | struct tegra_vgpu_tsg_runlist_interleave_params *p = | 581 | struct tegra_vgpu_tsg_runlist_interleave_params *p = |
595 | &msg.params.tsg_interleave; | 582 | &msg.params.tsg_interleave; |
@@ -598,7 +585,7 @@ static int vgpu_fifo_tsg_set_runlist_interleave(struct gk20a *g, | |||
598 | gk20a_dbg_fn(""); | 585 | gk20a_dbg_fn(""); |
599 | 586 | ||
600 | msg.cmd = TEGRA_VGPU_CMD_TSG_SET_RUNLIST_INTERLEAVE; | 587 | msg.cmd = TEGRA_VGPU_CMD_TSG_SET_RUNLIST_INTERLEAVE; |
601 | msg.handle = platform->virt_handle; | 588 | msg.handle = vgpu_get_handle(g); |
602 | p->tsg_id = tsgid; | 589 | p->tsg_id = tsgid; |
603 | p->level = new_level; | 590 | p->level = new_level; |
604 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | 591 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); |
@@ -612,7 +599,6 @@ static int vgpu_fifo_set_runlist_interleave(struct gk20a *g, | |||
612 | u32 runlist_id, | 599 | u32 runlist_id, |
613 | u32 new_level) | 600 | u32 new_level) |
614 | { | 601 | { |
615 | struct gk20a_platform *platform = gk20a_get_platform(g->dev); | ||
616 | struct tegra_vgpu_cmd_msg msg; | 602 | struct tegra_vgpu_cmd_msg msg; |
617 | struct tegra_vgpu_channel_runlist_interleave_params *p = | 603 | struct tegra_vgpu_channel_runlist_interleave_params *p = |
618 | &msg.params.channel_interleave; | 604 | &msg.params.channel_interleave; |
@@ -627,7 +613,7 @@ static int vgpu_fifo_set_runlist_interleave(struct gk20a *g, | |||
627 | 613 | ||
628 | ch = &g->fifo.channel[id]; | 614 | ch = &g->fifo.channel[id]; |
629 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_RUNLIST_INTERLEAVE; | 615 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_RUNLIST_INTERLEAVE; |
630 | msg.handle = platform->virt_handle; | 616 | msg.handle = vgpu_get_handle(ch->g); |
631 | p->handle = ch->virt_ctx; | 617 | p->handle = ch->virt_ctx; |
632 | p->level = new_level; | 618 | p->level = new_level; |
633 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | 619 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); |
@@ -637,7 +623,6 @@ static int vgpu_fifo_set_runlist_interleave(struct gk20a *g, | |||
637 | 623 | ||
638 | static int vgpu_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice) | 624 | static int vgpu_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice) |
639 | { | 625 | { |
640 | struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev); | ||
641 | struct tegra_vgpu_cmd_msg msg; | 626 | struct tegra_vgpu_cmd_msg msg; |
642 | struct tegra_vgpu_channel_timeslice_params *p = | 627 | struct tegra_vgpu_channel_timeslice_params *p = |
643 | &msg.params.channel_timeslice; | 628 | &msg.params.channel_timeslice; |
@@ -646,7 +631,7 @@ static int vgpu_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice) | |||
646 | gk20a_dbg_fn(""); | 631 | gk20a_dbg_fn(""); |
647 | 632 | ||
648 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_TIMESLICE; | 633 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_TIMESLICE; |
649 | msg.handle = platform->virt_handle; | 634 | msg.handle = vgpu_get_handle(ch->g); |
650 | p->handle = ch->virt_ctx; | 635 | p->handle = ch->virt_ctx; |
651 | p->timeslice_us = timeslice; | 636 | p->timeslice_us = timeslice; |
652 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | 637 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); |
@@ -659,7 +644,6 @@ static int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch, bool verbose) | |||
659 | struct tsg_gk20a *tsg = NULL; | 644 | struct tsg_gk20a *tsg = NULL; |
660 | struct channel_gk20a *ch_tsg = NULL; | 645 | struct channel_gk20a *ch_tsg = NULL; |
661 | struct gk20a *g = ch->g; | 646 | struct gk20a *g = ch->g; |
662 | struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev); | ||
663 | struct tegra_vgpu_cmd_msg msg = {0}; | 647 | struct tegra_vgpu_cmd_msg msg = {0}; |
664 | struct tegra_vgpu_channel_config_params *p = | 648 | struct tegra_vgpu_channel_config_params *p = |
665 | &msg.params.channel_config; | 649 | &msg.params.channel_config; |
@@ -687,7 +671,7 @@ static int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch, bool verbose) | |||
687 | } | 671 | } |
688 | 672 | ||
689 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FORCE_RESET; | 673 | msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FORCE_RESET; |
690 | msg.handle = platform->virt_handle; | 674 | msg.handle = vgpu_get_handle(ch->g); |
691 | p->handle = ch->virt_ctx; | 675 | p->handle = ch->virt_ctx; |
692 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | 676 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); |
693 | WARN_ON(err || msg.ret); | 677 | WARN_ON(err || msg.ret); |