summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c48
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.h6
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c79
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.h7
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c19
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.c9
-rw-r--r--include/uapi/linux/nvgpu.h30
8 files changed, 172 insertions, 28 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 9796d7c6..713c7737 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -328,18 +328,9 @@ static int channel_gk20a_setup_userd(struct channel_gk20a *c)
328static void channel_gk20a_bind(struct channel_gk20a *c) 328static void channel_gk20a_bind(struct channel_gk20a *c)
329{ 329{
330 struct gk20a *g = c->g; 330 struct gk20a *g = c->g;
331 struct fifo_gk20a *f = &g->fifo;
332 u32 engine_id;
333 struct fifo_engine_info_gk20a *engine_info = NULL;
334 u32 inst_ptr = gk20a_mm_inst_block_addr(g, &c->inst_block) 331 u32 inst_ptr = gk20a_mm_inst_block_addr(g, &c->inst_block)
335 >> ram_in_base_shift_v(); 332 >> ram_in_base_shift_v();
336 333
337 /* TODO:Need to handle non GR engine channel bind path */
338 engine_id = gk20a_fifo_get_gr_engine_id(g);
339
340 /* Consider 1st available GR engine */
341 engine_info = (f->engine_info + engine_id);
342
343 gk20a_dbg_info("bind channel %d inst ptr 0x%08x", 334 gk20a_dbg_info("bind channel %d inst ptr 0x%08x",
344 c->hw_chid, inst_ptr); 335 c->hw_chid, inst_ptr);
345 336
@@ -348,7 +339,7 @@ static void channel_gk20a_bind(struct channel_gk20a *c)
348 gk20a_writel(g, ccsr_channel_r(c->hw_chid), 339 gk20a_writel(g, ccsr_channel_r(c->hw_chid),
349 (gk20a_readl(g, ccsr_channel_r(c->hw_chid)) & 340 (gk20a_readl(g, ccsr_channel_r(c->hw_chid)) &
350 ~ccsr_channel_runlist_f(~0)) | 341 ~ccsr_channel_runlist_f(~0)) |
351 ccsr_channel_runlist_f(engine_info->runlist_id)); 342 ccsr_channel_runlist_f(c->runlist_id));
352 343
353 gk20a_writel(g, ccsr_channel_inst_r(c->hw_chid), 344 gk20a_writel(g, ccsr_channel_inst_r(c->hw_chid),
354 ccsr_channel_inst_ptr_f(inst_ptr) | 345 ccsr_channel_inst_ptr_f(inst_ptr) |
@@ -401,7 +392,7 @@ void channel_gk20a_free_inst(struct gk20a *g, struct channel_gk20a *ch)
401 392
402static int channel_gk20a_update_runlist(struct channel_gk20a *c, bool add) 393static int channel_gk20a_update_runlist(struct channel_gk20a *c, bool add)
403{ 394{
404 return c->g->ops.fifo.update_runlist(c->g, 0, c->hw_chid, add, true); 395 return c->g->ops.fifo.update_runlist(c->g, c->runlist_id, c->hw_chid, add, true);
405} 396}
406 397
407void channel_gk20a_enable(struct channel_gk20a *ch) 398void channel_gk20a_enable(struct channel_gk20a *ch)
@@ -715,7 +706,7 @@ static int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch,
715 break; 706 break;
716 } 707 }
717 708
718 return ret ? ret : g->ops.fifo.update_runlist(g, 0, ~0, true, true); 709 return ret ? ret : g->ops.fifo.update_runlist(g, ch->runlist_id, ~0, true, true);
719} 710}
720 711
721static int gk20a_init_error_notifier(struct channel_gk20a *ch, 712static int gk20a_init_error_notifier(struct channel_gk20a *ch,
@@ -1102,7 +1093,7 @@ struct channel_gk20a *gk20a_open_new_channel_with_cb(struct gk20a *g,
1102 void (*update_fn)(struct channel_gk20a *, void *), 1093 void (*update_fn)(struct channel_gk20a *, void *),
1103 void *update_fn_data) 1094 void *update_fn_data)
1104{ 1095{
1105 struct channel_gk20a *ch = gk20a_open_new_channel(g); 1096 struct channel_gk20a *ch = gk20a_open_new_channel(g, -1);
1106 1097
1107 if (ch) { 1098 if (ch) {
1108 spin_lock(&ch->update_fn_lock); 1099 spin_lock(&ch->update_fn_lock);
@@ -1114,11 +1105,16 @@ struct channel_gk20a *gk20a_open_new_channel_with_cb(struct gk20a *g,
1114 return ch; 1105 return ch;
1115} 1106}
1116 1107
1117struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g) 1108struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g, s32 runlist_id)
1118{ 1109{
1119 struct fifo_gk20a *f = &g->fifo; 1110 struct fifo_gk20a *f = &g->fifo;
1120 struct channel_gk20a *ch; 1111 struct channel_gk20a *ch;
1121 1112
1113 /* compatibility with existing code */
1114 if (!gk20a_fifo_is_valid_runlist_id(g, runlist_id)) {
1115 runlist_id = gk20a_fifo_get_gr_runlist_id(g);
1116 }
1117
1122 gk20a_dbg_fn(""); 1118 gk20a_dbg_fn("");
1123 1119
1124 ch = allocate_channel(f); 1120 ch = allocate_channel(f);
@@ -1133,6 +1129,9 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g)
1133 BUG_ON(ch->g); 1129 BUG_ON(ch->g);
1134 ch->g = g; 1130 ch->g = g;
1135 1131
1132 /* Runlist for the channel */
1133 ch->runlist_id = runlist_id;
1134
1136 if (g->ops.fifo.alloc_inst(g, ch)) { 1135 if (g->ops.fifo.alloc_inst(g, ch)) {
1137 ch->g = NULL; 1136 ch->g = NULL;
1138 free_channel(f, ch); 1137 free_channel(f, ch);
@@ -1184,7 +1183,8 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g)
1184 return ch; 1183 return ch;
1185} 1184}
1186 1185
1187static int __gk20a_channel_open(struct gk20a *g, struct file *filp) 1186/* note: runlist_id -1 is synonym for the ENGINE_GR_GK20A runlist id */
1187static int __gk20a_channel_open(struct gk20a *g, struct file *filp, s32 runlist_id)
1188{ 1188{
1189 int err; 1189 int err;
1190 struct channel_gk20a *ch; 1190 struct channel_gk20a *ch;
@@ -1198,7 +1198,7 @@ static int __gk20a_channel_open(struct gk20a *g, struct file *filp)
1198 gk20a_err(dev_from_gk20a(g), "failed to power on, %d", err); 1198 gk20a_err(dev_from_gk20a(g), "failed to power on, %d", err);
1199 return err; 1199 return err;
1200 } 1200 }
1201 ch = gk20a_open_new_channel(g); 1201 ch = gk20a_open_new_channel(g, runlist_id);
1202 gk20a_idle(g->dev); 1202 gk20a_idle(g->dev);
1203 if (!ch) { 1203 if (!ch) {
1204 gk20a_err(dev_from_gk20a(g), 1204 gk20a_err(dev_from_gk20a(g),
@@ -1220,7 +1220,7 @@ int gk20a_channel_open(struct inode *inode, struct file *filp)
1220 int ret; 1220 int ret;
1221 1221
1222 gk20a_dbg_fn("start"); 1222 gk20a_dbg_fn("start");
1223 ret = __gk20a_channel_open(g, filp); 1223 ret = __gk20a_channel_open(g, filp, -1);
1224 1224
1225 gk20a_dbg_fn("end"); 1225 gk20a_dbg_fn("end");
1226 return ret; 1226 return ret;
@@ -1233,6 +1233,7 @@ int gk20a_channel_open_ioctl(struct gk20a *g,
1233 int fd; 1233 int fd;
1234 struct file *file; 1234 struct file *file;
1235 char *name; 1235 char *name;
1236 s32 runlist_id = args->in.runlist_id;
1236 1237
1237 err = get_unused_fd_flags(O_RDWR); 1238 err = get_unused_fd_flags(O_RDWR);
1238 if (err < 0) 1239 if (err < 0)
@@ -1253,12 +1254,12 @@ int gk20a_channel_open_ioctl(struct gk20a *g,
1253 goto clean_up; 1254 goto clean_up;
1254 } 1255 }
1255 1256
1256 err = __gk20a_channel_open(g, file); 1257 err = __gk20a_channel_open(g, file, runlist_id);
1257 if (err) 1258 if (err)
1258 goto clean_up_file; 1259 goto clean_up_file;
1259 1260
1260 fd_install(fd, file); 1261 fd_install(fd, file);
1261 args->channel_fd = fd; 1262 args->out.channel_fd = fd;
1262 return 0; 1263 return 0;
1263 1264
1264clean_up_file: 1265clean_up_file:
@@ -2780,6 +2781,7 @@ int gk20a_channel_suspend(struct gk20a *g)
2780 u32 chid; 2781 u32 chid;
2781 bool channels_in_use = false; 2782 bool channels_in_use = false;
2782 int err; 2783 int err;
2784 u32 active_runlist_ids = 0;
2783 2785
2784 gk20a_dbg_fn(""); 2786 gk20a_dbg_fn("");
2785 2787
@@ -2803,12 +2805,14 @@ int gk20a_channel_suspend(struct gk20a *g)
2803 2805
2804 channels_in_use = true; 2806 channels_in_use = true;
2805 2807
2808 active_runlist_ids |= BIT(ch->runlist_id);
2809
2806 gk20a_channel_put(ch); 2810 gk20a_channel_put(ch);
2807 } 2811 }
2808 } 2812 }
2809 2813
2810 if (channels_in_use) { 2814 if (channels_in_use) {
2811 g->ops.fifo.update_runlist(g, 0, ~0, false, true); 2815 gk20a_fifo_update_runlist_ids(g, active_runlist_ids, ~0, false, true);
2812 2816
2813 for (chid = 0; chid < f->num_channels; chid++) { 2817 for (chid = 0; chid < f->num_channels; chid++) {
2814 if (gk20a_channel_get(&f->channel[chid])) { 2818 if (gk20a_channel_get(&f->channel[chid])) {
@@ -2827,6 +2831,7 @@ int gk20a_channel_resume(struct gk20a *g)
2827 struct fifo_gk20a *f = &g->fifo; 2831 struct fifo_gk20a *f = &g->fifo;
2828 u32 chid; 2832 u32 chid;
2829 bool channels_in_use = false; 2833 bool channels_in_use = false;
2834 u32 active_runlist_ids = 0;
2830 2835
2831 gk20a_dbg_fn(""); 2836 gk20a_dbg_fn("");
2832 2837
@@ -2835,12 +2840,13 @@ int gk20a_channel_resume(struct gk20a *g)
2835 gk20a_dbg_info("resume channel %d", chid); 2840 gk20a_dbg_info("resume channel %d", chid);
2836 g->ops.fifo.bind_channel(&f->channel[chid]); 2841 g->ops.fifo.bind_channel(&f->channel[chid]);
2837 channels_in_use = true; 2842 channels_in_use = true;
2843 active_runlist_ids |= BIT(f->channel[chid].runlist_id);
2838 gk20a_channel_put(&f->channel[chid]); 2844 gk20a_channel_put(&f->channel[chid]);
2839 } 2845 }
2840 } 2846 }
2841 2847
2842 if (channels_in_use) 2848 if (channels_in_use)
2843 g->ops.fifo.update_runlist(g, 0, ~0, true, true); 2849 gk20a_fifo_update_runlist_ids(g, active_runlist_ids, ~0, true, true);
2844 2850
2845 gk20a_dbg_fn("done"); 2851 gk20a_dbg_fn("done");
2846 return 0; 2852 return 0;
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
index 4e0db3cf..d8528984 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
@@ -198,6 +198,8 @@ struct channel_gk20a {
198 struct work_struct update_fn_work; 198 struct work_struct update_fn_work;
199 199
200 u32 interleave_level; 200 u32 interleave_level;
201
202 u32 runlist_id;
201}; 203};
202 204
203static inline bool gk20a_channel_as_bound(struct channel_gk20a *ch) 205static inline bool gk20a_channel_as_bound(struct channel_gk20a *ch)
@@ -250,7 +252,9 @@ void _gk20a_channel_put(struct channel_gk20a *ch, const char *caller);
250#define gk20a_channel_put(ch) _gk20a_channel_put(ch, __func__) 252#define gk20a_channel_put(ch) _gk20a_channel_put(ch, __func__)
251 253
252int gk20a_wait_channel_idle(struct channel_gk20a *ch); 254int gk20a_wait_channel_idle(struct channel_gk20a *ch);
253struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g); 255
256/* runlist_id -1 is synonym for ENGINE_GR_GK20A runlist id */
257struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g, s32 runlist_id);
254struct channel_gk20a *gk20a_open_new_channel_with_cb(struct gk20a *g, 258struct channel_gk20a *gk20a_open_new_channel_with_cb(struct gk20a *g,
255 void (*update_fn)(struct channel_gk20a *, void *), 259 void (*update_fn)(struct channel_gk20a *, void *),
256 void *update_fn_data); 260 void *update_fn_data);
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 766ea749..ab06b4f9 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -165,6 +165,61 @@ u32 gk20a_fifo_get_all_ce_engine_reset_mask(struct gk20a *g)
165 return reset_mask; 165 return reset_mask;
166} 166}
167 167
168u32 gk20a_fifo_get_gr_runlist_id(struct gk20a *g)
169{
170 u32 gr_engine_cnt = 0;
171 u32 gr_engine_id = FIFO_INVAL_ENGINE_ID;
172 struct fifo_engine_info_gk20a *engine_info;
173 u32 gr_runlist_id = ~0;
174
175 /* Consider 1st available GR engine */
176 gr_engine_cnt = gk20a_fifo_get_engine_ids(g, &gr_engine_id,
177 1, ENGINE_GR_GK20A);
178
179 if (!gr_engine_cnt) {
180 gk20a_err(dev_from_gk20a(g),
181 "No GR engine available on this device!");
182 goto end;
183 }
184
185 engine_info = gk20a_fifo_get_engine_info(g, gr_engine_id);
186
187 if (engine_info) {
188 gr_runlist_id = engine_info->runlist_id;
189 } else {
190 gk20a_err(g->dev,
191 "gr_engine_id is not in active list/invalid %d", gr_engine_id);
192 }
193
194end:
195 return gr_runlist_id;
196}
197
198bool gk20a_fifo_is_valid_runlist_id(struct gk20a *g, u32 runlist_id)
199{
200 struct fifo_gk20a *f = NULL;
201 u32 engine_id_idx;
202 u32 active_engine_id;
203 struct fifo_engine_info_gk20a *engine_info;
204
205 if (!g)
206 return false;
207
208 f = &g->fifo;
209
210 for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) {
211 active_engine_id = f->active_engines_list[engine_id_idx];
212 engine_info = gk20a_fifo_get_engine_info(g, active_engine_id);
213 if (engine_info && (engine_info->runlist_id == runlist_id)) {
214 return true;
215 }
216 }
217
218 gk20a_err(g->dev, "runlist_id is not in active list/invalid %d", runlist_id);
219
220 return false;
221}
222
168/* 223/*
169 * Link engine IDs to MMU IDs and vice versa. 224 * Link engine IDs to MMU IDs and vice versa.
170 */ 225 */
@@ -2736,6 +2791,30 @@ clean_up:
2736 return ret; 2791 return ret;
2737} 2792}
2738 2793
2794int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 hw_chid,
2795 bool add, bool wait_for_finish)
2796{
2797 u32 ret = -EINVAL;
2798 u32 runlist_id = 0;
2799 u32 errcode;
2800
2801 if (!g)
2802 goto end;
2803
2804 ret = 0;
2805 for_each_set_bit(runlist_id, (unsigned long *)&runlist_ids, 32) {
2806 /* Capture the last failure error code */
2807 errcode = g->ops.fifo.update_runlist(g, runlist_id, hw_chid, add, wait_for_finish);
2808 if (errcode) {
2809 gk20a_err(dev_from_gk20a(g),
2810 "failed to update_runlist %d %d", runlist_id, errcode);
2811 ret = errcode;
2812 }
2813 }
2814end:
2815 return ret;
2816}
2817
2739/* add/remove a channel from runlist 2818/* add/remove a channel from runlist
2740 special cases below: runlist->active_channels will NOT be changed. 2819 special cases below: runlist->active_channels will NOT be changed.
2741 (hw_chid == ~0 && !add) means remove all active channels from runlist. 2820 (hw_chid == ~0 && !add) means remove all active channels from runlist.
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
index 25d2cd9f..e6ae0bdc 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
@@ -243,4 +243,11 @@ bool gk20a_fifo_is_valid_engine_id(struct gk20a *g, u32 engine_id);
243u32 gk20a_fifo_get_gr_engine_id(struct gk20a *g); 243u32 gk20a_fifo_get_gr_engine_id(struct gk20a *g);
244 244
245u32 gk20a_fifo_get_all_ce_engine_reset_mask(struct gk20a *g); 245u32 gk20a_fifo_get_all_ce_engine_reset_mask(struct gk20a *g);
246
247u32 gk20a_fifo_get_gr_runlist_id(struct gk20a *g);
248
249bool gk20a_fifo_is_valid_runlist_id(struct gk20a *g, u32 runlist_id);
250
251int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 hw_chid,
252 bool add, bool wait_for_finish);
246#endif /*__GR_GK20A_H__*/ 253#endif /*__GR_GK20A_H__*/
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index f7d5535d..603ed6f5 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -114,6 +114,16 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
114 114
115 ch->tsgid = tsg->tsgid; 115 ch->tsgid = tsg->tsgid;
116 116
117 /* all the channel part of TSG should need to be same runlist_id */
118 if (tsg->runlist_id == ~0)
119 tsg->runlist_id = ch->runlist_id;
120 else if (tsg->runlist_id != ch->runlist_id) {
121 gk20a_err(dev_from_gk20a(tsg->g),
122 "Error: TSG channel should be share same runlist ch[%d] tsg[%d]\n",
123 ch->runlist_id, tsg->runlist_id);
124 return -EINVAL;
125 }
126
117 mutex_lock(&tsg->ch_list_lock); 127 mutex_lock(&tsg->ch_list_lock);
118 list_add_tail(&ch->ch_entry, &tsg->ch_list); 128 list_add_tail(&ch->ch_entry, &tsg->ch_list);
119 mutex_unlock(&tsg->ch_list_lock); 129 mutex_unlock(&tsg->ch_list_lock);
@@ -185,7 +195,7 @@ static int gk20a_tsg_set_priority(struct gk20a *g, struct tsg_gk20a *tsg,
185 gk20a_channel_get_timescale_from_timeslice(g, tsg->timeslice_us, 195 gk20a_channel_get_timescale_from_timeslice(g, tsg->timeslice_us,
186 &tsg->timeslice_timeout, &tsg->timeslice_scale); 196 &tsg->timeslice_timeout, &tsg->timeslice_scale);
187 197
188 g->ops.fifo.update_runlist(g, 0, ~0, true, true); 198 g->ops.fifo.update_runlist(g, tsg->runlist_id, ~0, true, true);
189 199
190 return 0; 200 return 0;
191} 201}
@@ -346,7 +356,7 @@ static int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level)
346 break; 356 break;
347 } 357 }
348 358
349 return ret ? ret : g->ops.fifo.update_runlist(g, 0, ~0, true, true); 359 return ret ? ret : g->ops.fifo.update_runlist(g, tsg->runlist_id, ~0, true, true);
350} 360}
351 361
352static int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice) 362static int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice)
@@ -360,7 +370,7 @@ static int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice)
360 gk20a_channel_get_timescale_from_timeslice(g, timeslice, 370 gk20a_channel_get_timescale_from_timeslice(g, timeslice,
361 &tsg->timeslice_timeout, &tsg->timeslice_scale); 371 &tsg->timeslice_timeout, &tsg->timeslice_scale);
362 372
363 return g->ops.fifo.update_runlist(g, 0, ~0, true, true); 373 return g->ops.fifo.update_runlist(g, tsg->runlist_id, ~0, true, true);
364} 374}
365 375
366static void release_used_tsg(struct fifo_gk20a *f, struct tsg_gk20a *tsg) 376static void release_used_tsg(struct fifo_gk20a *f, struct tsg_gk20a *tsg)
@@ -411,6 +421,7 @@ int gk20a_tsg_open(struct gk20a *g, struct file *filp)
411 tsg->timeslice_us = 0; 421 tsg->timeslice_us = 0;
412 tsg->timeslice_timeout = 0; 422 tsg->timeslice_timeout = 0;
413 tsg->timeslice_scale = 0; 423 tsg->timeslice_scale = 0;
424 tsg->runlist_id = ~0;
414 425
415 filp->private_data = tsg; 426 filp->private_data = tsg;
416 427
@@ -448,6 +459,8 @@ static void gk20a_tsg_release(struct kref *ref)
448 459
449 release_used_tsg(&g->fifo, tsg); 460 release_used_tsg(&g->fifo, tsg);
450 461
462 tsg->runlist_id = ~0;
463
451 gk20a_dbg(gpu_dbg_fn, "tsg released %d\n", tsg->tsgid); 464 gk20a_dbg(gpu_dbg_fn, "tsg released %d\n", tsg->tsgid);
452} 465}
453 466
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
index 14ead5c0..57414690 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
@@ -54,6 +54,8 @@ struct tsg_gk20a {
54 54
55 struct list_head event_id_list; 55 struct list_head event_id_list;
56 struct mutex event_id_list_lock; 56 struct mutex event_id_list_lock;
57
58 u32 runlist_id;
57}; 59};
58 60
59int gk20a_enable_tsg(struct tsg_gk20a *tsg); 61int gk20a_enable_tsg(struct tsg_gk20a *tsg);
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
index 9591c72a..11f389fb 100644
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
@@ -175,6 +175,8 @@ static int init_engine_info(struct fifo_gk20a *f)
175 175
176 /* FIXME: retrieve this from server */ 176 /* FIXME: retrieve this from server */
177 gr_info->runlist_id = 0; 177 gr_info->runlist_id = 0;
178 f->active_engines_list[0] = gr_sw_id;
179
178 return 0; 180 return 0;
179} 181}
180 182
@@ -281,11 +283,14 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
281 f->tsg = vzalloc(f->num_channels * sizeof(*f->tsg)); 283 f->tsg = vzalloc(f->num_channels * sizeof(*f->tsg));
282 f->engine_info = kzalloc(f->max_engines * sizeof(*f->engine_info), 284 f->engine_info = kzalloc(f->max_engines * sizeof(*f->engine_info),
283 GFP_KERNEL); 285 GFP_KERNEL);
286 f->active_engines_list = kzalloc(f->max_engines * sizeof(u32),
287 GFP_KERNEL);
284 288
285 if (!(f->channel && f->tsg && f->engine_info)) { 289 if (!(f->channel && f->tsg && f->engine_info && f->active_engines_list)) {
286 err = -ENOMEM; 290 err = -ENOMEM;
287 goto clean_up; 291 goto clean_up;
288 } 292 }
293 memset(f->active_engines_list, 0xff, (f->max_engines * sizeof(u32)));
289 294
290 init_engine_info(f); 295 init_engine_info(f);
291 296
@@ -327,6 +332,8 @@ clean_up:
327 f->tsg = NULL; 332 f->tsg = NULL;
328 kfree(f->engine_info); 333 kfree(f->engine_info);
329 f->engine_info = NULL; 334 f->engine_info = NULL;
335 kfree(f->active_engines_list);
336 f->active_engines_list = NULL;
330 337
331 return err; 338 return err;
332} 339}
diff --git a/include/uapi/linux/nvgpu.h b/include/uapi/linux/nvgpu.h
index 9d649536..992355d9 100644
--- a/include/uapi/linux/nvgpu.h
+++ b/include/uapi/linux/nvgpu.h
@@ -298,7 +298,20 @@ struct nvgpu_gpu_get_tpc_masks_args {
298}; 298};
299 299
300struct nvgpu_gpu_open_channel_args { 300struct nvgpu_gpu_open_channel_args {
301 __s32 channel_fd; 301 union {
302 __s32 channel_fd; /* deprecated: use out.channel_fd instead */
303 struct {
304 /* runlist_id is the runlist for the
305 * channel. Basically, the runlist specifies the target
306 * engine(s) for which the channel is
307 * opened. Runlist_id -1 is synonym for the primary
308 * graphics runlist. */
309 __s32 runlist_id;
310 } in;
311 struct {
312 __s32 channel_fd;
313 } out;
314 };
302}; 315};
303 316
304/* L2 cache writeback, optionally invalidate clean lines and flush fb */ 317/* L2 cache writeback, optionally invalidate clean lines and flush fb */
@@ -820,7 +833,20 @@ struct nvgpu_get_param_args {
820} __packed; 833} __packed;
821 834
822struct nvgpu_channel_open_args { 835struct nvgpu_channel_open_args {
823 __s32 channel_fd; 836 union {
837 __s32 channel_fd; /* deprecated: use out.channel_fd instead */
838 struct {
839 /* runlist_id is the runlist for the
840 * channel. Basically, the runlist specifies the target
841 * engine(s) for which the channel is
842 * opened. Runlist_id -1 is synonym for the primary
843 * graphics runlist. */
844 __s32 runlist_id;
845 } in;
846 struct {
847 __s32 channel_fd;
848 } out;
849 };
824}; 850};
825 851
826struct nvgpu_set_nvmap_fd_args { 852struct nvgpu_set_nvmap_fd_args {