diff options
author | Peter Daifuku <pdaifuku@nvidia.com> | 2017-10-06 19:27:14 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-10-13 18:20:18 -0400 |
commit | 57fb527a7e33384341fc18f1f918d5a8225057f5 (patch) | |
tree | 23bb49f879ac495834237c99564f0589d637f07e /drivers/gpu/nvgpu/vgpu/fifo_vgpu.c | |
parent | 3d343c9eeaa3415851d1c71b8815eb7dc2677b5a (diff) |
gpu: nvgpu: vgpu: flatten out vgpu hal
Instead of calling the native HAL init function then adding
multiple layers of modification for VGPU, flatten out the sequence
so that all entry points are set statically and visible in a
single file.
JIRA ESRM-30
Change-Id: Ie424abb48bce5038874851d399baac5e4bb7d27c
Signed-off-by: Peter Daifuku <pdaifuku@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1574616
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/fifo_vgpu.c')
-rw-r--r-- | drivers/gpu/nvgpu/vgpu/fifo_vgpu.c | 61 |
1 files changed, 18 insertions, 43 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c index 73a67d91..582894b9 100644 --- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c | |||
@@ -32,12 +32,13 @@ | |||
32 | #include <nvgpu/barrier.h> | 32 | #include <nvgpu/barrier.h> |
33 | 33 | ||
34 | #include "vgpu/vgpu.h" | 34 | #include "vgpu/vgpu.h" |
35 | #include "vgpu/fifo_vgpu.h" | ||
35 | #include "gk20a/ctxsw_trace_gk20a.h" | 36 | #include "gk20a/ctxsw_trace_gk20a.h" |
36 | 37 | ||
37 | #include <nvgpu/hw/gk20a/hw_fifo_gk20a.h> | 38 | #include <nvgpu/hw/gk20a/hw_fifo_gk20a.h> |
38 | #include <nvgpu/hw/gk20a/hw_ram_gk20a.h> | 39 | #include <nvgpu/hw/gk20a/hw_ram_gk20a.h> |
39 | 40 | ||
40 | static void vgpu_channel_bind(struct channel_gk20a *ch) | 41 | void vgpu_channel_bind(struct channel_gk20a *ch) |
41 | { | 42 | { |
42 | struct tegra_vgpu_cmd_msg msg; | 43 | struct tegra_vgpu_cmd_msg msg; |
43 | struct tegra_vgpu_channel_config_params *p = | 44 | struct tegra_vgpu_channel_config_params *p = |
@@ -56,7 +57,7 @@ static void vgpu_channel_bind(struct channel_gk20a *ch) | |||
56 | nvgpu_atomic_set(&ch->bound, true); | 57 | nvgpu_atomic_set(&ch->bound, true); |
57 | } | 58 | } |
58 | 59 | ||
59 | static void vgpu_channel_unbind(struct channel_gk20a *ch) | 60 | void vgpu_channel_unbind(struct channel_gk20a *ch) |
60 | { | 61 | { |
61 | 62 | ||
62 | gk20a_dbg_fn(""); | 63 | gk20a_dbg_fn(""); |
@@ -76,7 +77,7 @@ static void vgpu_channel_unbind(struct channel_gk20a *ch) | |||
76 | 77 | ||
77 | } | 78 | } |
78 | 79 | ||
79 | static int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) | 80 | int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) |
80 | { | 81 | { |
81 | struct tegra_vgpu_cmd_msg msg; | 82 | struct tegra_vgpu_cmd_msg msg; |
82 | struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; | 83 | struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; |
@@ -99,7 +100,7 @@ static int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) | |||
99 | return 0; | 100 | return 0; |
100 | } | 101 | } |
101 | 102 | ||
102 | static void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch) | 103 | void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch) |
103 | { | 104 | { |
104 | struct tegra_vgpu_cmd_msg msg; | 105 | struct tegra_vgpu_cmd_msg msg; |
105 | struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; | 106 | struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; |
@@ -114,7 +115,7 @@ static void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch) | |||
114 | WARN_ON(err || msg.ret); | 115 | WARN_ON(err || msg.ret); |
115 | } | 116 | } |
116 | 117 | ||
117 | static void vgpu_channel_enable(struct channel_gk20a *ch) | 118 | void vgpu_channel_enable(struct channel_gk20a *ch) |
118 | { | 119 | { |
119 | struct tegra_vgpu_cmd_msg msg; | 120 | struct tegra_vgpu_cmd_msg msg; |
120 | struct tegra_vgpu_channel_config_params *p = | 121 | struct tegra_vgpu_channel_config_params *p = |
@@ -130,7 +131,7 @@ static void vgpu_channel_enable(struct channel_gk20a *ch) | |||
130 | WARN_ON(err || msg.ret); | 131 | WARN_ON(err || msg.ret); |
131 | } | 132 | } |
132 | 133 | ||
133 | static void vgpu_channel_disable(struct channel_gk20a *ch) | 134 | void vgpu_channel_disable(struct channel_gk20a *ch) |
134 | { | 135 | { |
135 | struct tegra_vgpu_cmd_msg msg; | 136 | struct tegra_vgpu_cmd_msg msg; |
136 | struct tegra_vgpu_channel_config_params *p = | 137 | struct tegra_vgpu_channel_config_params *p = |
@@ -146,7 +147,7 @@ static void vgpu_channel_disable(struct channel_gk20a *ch) | |||
146 | WARN_ON(err || msg.ret); | 147 | WARN_ON(err || msg.ret); |
147 | } | 148 | } |
148 | 149 | ||
149 | static int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base, | 150 | int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base, |
150 | u32 gpfifo_entries, | 151 | u32 gpfifo_entries, |
151 | unsigned long acquire_timeout, u32 flags) | 152 | unsigned long acquire_timeout, u32 flags) |
152 | { | 153 | { |
@@ -170,7 +171,7 @@ static int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base, | |||
170 | return (err || msg.ret) ? -ENOMEM : 0; | 171 | return (err || msg.ret) ? -ENOMEM : 0; |
171 | } | 172 | } |
172 | 173 | ||
173 | static int vgpu_fifo_init_engine_info(struct fifo_gk20a *f) | 174 | int vgpu_fifo_init_engine_info(struct fifo_gk20a *f) |
174 | { | 175 | { |
175 | struct vgpu_priv_data *priv = vgpu_get_priv_data(f->g); | 176 | struct vgpu_priv_data *priv = vgpu_get_priv_data(f->g); |
176 | struct tegra_vgpu_engines_info *engines = &priv->constants.engines_info; | 177 | struct tegra_vgpu_engines_info *engines = &priv->constants.engines_info; |
@@ -377,7 +378,7 @@ clean_up: | |||
377 | return err; | 378 | return err; |
378 | } | 379 | } |
379 | 380 | ||
380 | static int vgpu_init_fifo_setup_hw(struct gk20a *g) | 381 | int vgpu_init_fifo_setup_hw(struct gk20a *g) |
381 | { | 382 | { |
382 | gk20a_dbg_fn(""); | 383 | gk20a_dbg_fn(""); |
383 | 384 | ||
@@ -440,7 +441,7 @@ int vgpu_init_fifo_support(struct gk20a *g) | |||
440 | return err; | 441 | return err; |
441 | } | 442 | } |
442 | 443 | ||
443 | static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 chid) | 444 | int vgpu_fifo_preempt_channel(struct gk20a *g, u32 chid) |
444 | { | 445 | { |
445 | struct fifo_gk20a *f = &g->fifo; | 446 | struct fifo_gk20a *f = &g->fifo; |
446 | struct channel_gk20a *ch = &f->channel[chid]; | 447 | struct channel_gk20a *ch = &f->channel[chid]; |
@@ -468,7 +469,7 @@ static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 chid) | |||
468 | return err; | 469 | return err; |
469 | } | 470 | } |
470 | 471 | ||
471 | static int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) | 472 | int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) |
472 | { | 473 | { |
473 | struct tegra_vgpu_cmd_msg msg; | 474 | struct tegra_vgpu_cmd_msg msg; |
474 | struct tegra_vgpu_tsg_preempt_params *p = | 475 | struct tegra_vgpu_tsg_preempt_params *p = |
@@ -579,7 +580,7 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, | |||
579 | special cases below: runlist->active_channels will NOT be changed. | 580 | special cases below: runlist->active_channels will NOT be changed. |
580 | (chid == ~0 && !add) means remove all active channels from runlist. | 581 | (chid == ~0 && !add) means remove all active channels from runlist. |
581 | (chid == ~0 && add) means restore all active channels on runlist. */ | 582 | (chid == ~0 && add) means restore all active channels on runlist. */ |
582 | static int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id, | 583 | int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id, |
583 | u32 chid, bool add, bool wait_for_finish) | 584 | u32 chid, bool add, bool wait_for_finish) |
584 | { | 585 | { |
585 | struct fifo_runlist_info_gk20a *runlist = NULL; | 586 | struct fifo_runlist_info_gk20a *runlist = NULL; |
@@ -599,14 +600,14 @@ static int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id, | |||
599 | return ret; | 600 | return ret; |
600 | } | 601 | } |
601 | 602 | ||
602 | static int vgpu_fifo_wait_engine_idle(struct gk20a *g) | 603 | int vgpu_fifo_wait_engine_idle(struct gk20a *g) |
603 | { | 604 | { |
604 | gk20a_dbg_fn(""); | 605 | gk20a_dbg_fn(""); |
605 | 606 | ||
606 | return 0; | 607 | return 0; |
607 | } | 608 | } |
608 | 609 | ||
609 | static int vgpu_channel_set_priority(struct channel_gk20a *ch, u32 priority) | 610 | int vgpu_channel_set_priority(struct channel_gk20a *ch, u32 priority) |
610 | { | 611 | { |
611 | struct tegra_vgpu_cmd_msg msg; | 612 | struct tegra_vgpu_cmd_msg msg; |
612 | struct tegra_vgpu_channel_priority_params *p = | 613 | struct tegra_vgpu_channel_priority_params *p = |
@@ -646,7 +647,7 @@ static int vgpu_fifo_tsg_set_runlist_interleave(struct gk20a *g, | |||
646 | return err ? err : msg.ret; | 647 | return err ? err : msg.ret; |
647 | } | 648 | } |
648 | 649 | ||
649 | static int vgpu_fifo_set_runlist_interleave(struct gk20a *g, | 650 | int vgpu_fifo_set_runlist_interleave(struct gk20a *g, |
650 | u32 id, | 651 | u32 id, |
651 | bool is_tsg, | 652 | bool is_tsg, |
652 | u32 runlist_id, | 653 | u32 runlist_id, |
@@ -674,7 +675,7 @@ static int vgpu_fifo_set_runlist_interleave(struct gk20a *g, | |||
674 | return err ? err : msg.ret; | 675 | return err ? err : msg.ret; |
675 | } | 676 | } |
676 | 677 | ||
677 | static int vgpu_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice) | 678 | int vgpu_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice) |
678 | { | 679 | { |
679 | struct tegra_vgpu_cmd_msg msg; | 680 | struct tegra_vgpu_cmd_msg msg; |
680 | struct tegra_vgpu_channel_timeslice_params *p = | 681 | struct tegra_vgpu_channel_timeslice_params *p = |
@@ -695,7 +696,7 @@ static int vgpu_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice) | |||
695 | return err; | 696 | return err; |
696 | } | 697 | } |
697 | 698 | ||
698 | static int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch, | 699 | int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch, |
699 | u32 err_code, bool verbose) | 700 | u32 err_code, bool verbose) |
700 | { | 701 | { |
701 | struct tsg_gk20a *tsg = NULL; | 702 | struct tsg_gk20a *tsg = NULL; |
@@ -818,29 +819,3 @@ u32 vgpu_fifo_default_timeslice_us(struct gk20a *g) | |||
818 | 819 | ||
819 | return priv->constants.default_timeslice_us; | 820 | return priv->constants.default_timeslice_us; |
820 | } | 821 | } |
821 | |||
822 | void vgpu_init_fifo_ops(struct gpu_ops *gops) | ||
823 | { | ||
824 | gops->fifo.init_fifo_setup_hw = vgpu_init_fifo_setup_hw; | ||
825 | gops->fifo.bind_channel = vgpu_channel_bind; | ||
826 | gops->fifo.unbind_channel = vgpu_channel_unbind; | ||
827 | gops->fifo.enable_channel = vgpu_channel_enable; | ||
828 | gops->fifo.disable_channel = vgpu_channel_disable; | ||
829 | gops->fifo.alloc_inst = vgpu_channel_alloc_inst; | ||
830 | gops->fifo.free_inst = vgpu_channel_free_inst; | ||
831 | gops->fifo.setup_ramfc = vgpu_channel_setup_ramfc; | ||
832 | gops->fifo.preempt_channel = vgpu_fifo_preempt_channel; | ||
833 | gops->fifo.preempt_tsg = vgpu_fifo_preempt_tsg; | ||
834 | gops->fifo.enable_tsg = gk20a_enable_tsg; | ||
835 | gops->fifo.disable_tsg = gk20a_disable_tsg; | ||
836 | /* Not supported yet for vgpu */ | ||
837 | gops->fifo.tsg_verify_channel_status = NULL; | ||
838 | gops->fifo.update_runlist = vgpu_fifo_update_runlist; | ||
839 | gops->fifo.wait_engine_idle = vgpu_fifo_wait_engine_idle; | ||
840 | gops->fifo.channel_set_priority = vgpu_channel_set_priority; | ||
841 | gops->fifo.set_runlist_interleave = vgpu_fifo_set_runlist_interleave; | ||
842 | gops->fifo.channel_set_timeslice = vgpu_channel_set_timeslice; | ||
843 | gops->fifo.force_reset_ch = vgpu_fifo_force_reset_ch; | ||
844 | gops->fifo.init_engine_info = vgpu_fifo_init_engine_info; | ||
845 | gops->fifo.default_timeslice_us = vgpu_fifo_default_timeslice_us; | ||
846 | } | ||