summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-12-27 16:04:17 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-01-02 16:53:09 -0500
commit86691b59c6fae2b091855c0f4d44079cad8529b1 (patch)
treec9fd2914ed052771980298f5e77bd43d7c430ec9 /drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
parent14fa8207e2f7feb36f8fa284b94859e05136eb0f (diff)
gpu: nvgpu: Remove bare channel scheduling
Remove scheduling IOCTL implementations for bare channels. Also removes code that constructs bare channels in runlist. Bug 1842197 Change-Id: I6e833b38e24a2f2c45c7993edf939d365eaf41f0 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1627326 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c90
1 files changed, 2 insertions, 88 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index fc71e907..194d5e3c 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -3075,48 +3075,11 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
3075 bool last_level = cur_level == NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH; 3075 bool last_level = cur_level == NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH;
3076 struct channel_gk20a *ch; 3076 struct channel_gk20a *ch;
3077 bool skip_next = false; 3077 bool skip_next = false;
3078 u32 chid, tsgid, count = 0; 3078 u32 tsgid, count = 0;
3079 u32 runlist_entry_words = f->runlist_entry_size / sizeof(u32); 3079 u32 runlist_entry_words = f->runlist_entry_size / sizeof(u32);
3080 3080
3081 gk20a_dbg_fn(""); 3081 gk20a_dbg_fn("");
3082 3082
3083 /* for each bare channel, CH, on this level, insert all higher-level
3084 channels and TSGs before inserting CH. */
3085 for_each_set_bit(chid, runlist->active_channels, f->num_channels) {
3086 ch = &f->channel[chid];
3087
3088 if (ch->interleave_level != cur_level)
3089 continue;
3090
3091 if (gk20a_is_channel_marked_as_tsg(ch))
3092 continue;
3093
3094 if (!last_level && !skip_next) {
3095 runlist_entry = gk20a_runlist_construct_locked(f,
3096 runlist,
3097 cur_level + 1,
3098 runlist_entry,
3099 interleave_enabled,
3100 false,
3101 entries_left);
3102 /* if interleaving is disabled, higher-level channels
3103 and TSGs only need to be inserted once */
3104 if (!interleave_enabled)
3105 skip_next = true;
3106 }
3107
3108 if (!(*entries_left))
3109 return NULL;
3110
3111 gk20a_dbg_info("add channel %d to runlist", chid);
3112 f->g->ops.fifo.get_ch_runlist_entry(ch, runlist_entry);
3113 gk20a_dbg_info("run list count %d runlist [0] %x [1] %x\n",
3114 count, runlist_entry[0], runlist_entry[1]);
3115 runlist_entry += runlist_entry_words;
3116 count++;
3117 (*entries_left)--;
3118 }
3119
3120 /* for each TSG, T, on this level, insert all higher-level channels 3083 /* for each TSG, T, on this level, insert all higher-level channels
3121 and TSGs before inserting T. */ 3084 and TSGs before inserting T. */
3122 for_each_set_bit(tsgid, runlist->active_tsgs, f->num_channels) { 3085 for_each_set_bit(tsgid, runlist->active_tsgs, f->num_channels) {
@@ -3204,16 +3167,12 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
3204 3167
3205int gk20a_fifo_set_runlist_interleave(struct gk20a *g, 3168int gk20a_fifo_set_runlist_interleave(struct gk20a *g,
3206 u32 id, 3169 u32 id,
3207 bool is_tsg,
3208 u32 runlist_id, 3170 u32 runlist_id,
3209 u32 new_level) 3171 u32 new_level)
3210{ 3172{
3211 gk20a_dbg_fn(""); 3173 gk20a_dbg_fn("");
3212 3174
3213 if (is_tsg) 3175 g->fifo.tsg[id].interleave_level = new_level;
3214 g->fifo.tsg[id].interleave_level = new_level;
3215 else
3216 g->fifo.channel[id].interleave_level = new_level;
3217 3176
3218 return 0; 3177 return 0;
3219} 3178}
@@ -3917,51 +3876,6 @@ int gk20a_fifo_setup_ramfc(struct channel_gk20a *c,
3917 return gk20a_fifo_commit_userd(c); 3876 return gk20a_fifo_commit_userd(c);
3918} 3877}
3919 3878
3920static int channel_gk20a_set_schedule_params(struct channel_gk20a *c)
3921{
3922 int shift = 0, value = 0;
3923
3924 gk20a_channel_get_timescale_from_timeslice(c->g,
3925 c->timeslice_us, &value, &shift);
3926
3927 /* disable channel */
3928 c->g->ops.fifo.disable_channel(c);
3929
3930 /* preempt the channel */
3931 WARN_ON(c->g->ops.fifo.preempt_channel(c->g, c->chid));
3932
3933 /* set new timeslice */
3934 nvgpu_mem_wr32(c->g, &c->inst_block, ram_fc_runlist_timeslice_w(),
3935 value | (shift << 12) |
3936 fifo_runlist_timeslice_enable_true_f());
3937
3938 /* enable channel */
3939 c->g->ops.fifo.enable_channel(c);
3940
3941 return 0;
3942}
3943
3944int gk20a_fifo_set_timeslice(struct channel_gk20a *ch, u32 timeslice)
3945{
3946 struct gk20a *g = ch->g;
3947
3948 if (gk20a_is_channel_marked_as_tsg(ch)) {
3949 nvgpu_err(g, "invalid operation for TSG!");
3950 return -EINVAL;
3951 }
3952
3953 if (timeslice < g->min_timeslice_us ||
3954 timeslice > g->max_timeslice_us)
3955 return -EINVAL;
3956
3957 ch->timeslice_us = timeslice;
3958
3959 gk20a_dbg(gpu_dbg_sched, "chid=%u timeslice=%u us",
3960 ch->chid, timeslice);
3961
3962 return channel_gk20a_set_schedule_params(ch);
3963}
3964
3965void gk20a_fifo_setup_ramfc_for_privileged_channel(struct channel_gk20a *c) 3879void gk20a_fifo_setup_ramfc_for_privileged_channel(struct channel_gk20a *c)
3966{ 3880{
3967 struct gk20a *g = c->g; 3881 struct gk20a *g = c->g;