summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Fleury <tfleury@nvidia.com>2016-06-24 11:52:18 -0400
committerVijayakumar Subbu <vsubbu@nvidia.com>2016-07-19 02:17:55 -0400
commitd6efa8fc625fc297b9baa0755407cee1180fbbe6 (patch)
tree5bf0f7b53579afc1fb16130ec2b54be8fba6cbf1
parentc8ffe0fdecfa110a9f9beb1b7e0298d3c3c64cc2 (diff)
gpu: nvgpu: add ref counting for GPU sched ctrl
Jira VFND-1968 Change-Id: Id84c5732e312e44db3d412df5c21e429227dd7fa Signed-off-by: Thomas Fleury <tfleury@nvidia.com> Reviewed-on: http://git-master/r/1171286 (cherry picked from commit 13a3a4355914635ed175708affef17dc8ef0b133) Reviewed-on: http://git-master/r/1177824 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Richard Zhao <rizhao@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gk20a/sched_gk20a.c195
-rw-r--r--drivers/gpu/nvgpu/gk20a/sched_gk20a.h1
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.h1
-rw-r--r--include/uapi/linux/nvgpu.h22
5 files changed, 157 insertions, 66 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
index bcbbbe8b..3d7e8bd7 100644
--- a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
@@ -105,8 +105,6 @@ static int gk20a_sched_dev_ioctl_get_tsgs(struct gk20a_sched_ctrl *sched,
105 mutex_unlock(&sched->status_lock); 105 mutex_unlock(&sched->status_lock);
106 return -EFAULT; 106 return -EFAULT;
107 } 107 }
108
109 memset(sched->recent_tsg_bitmap, 0, sched->bitmap_size);
110 mutex_unlock(&sched->status_lock); 108 mutex_unlock(&sched->status_lock);
111 109
112 return 0; 110 return 0;
@@ -159,13 +157,15 @@ static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched,
159 if (!bitmap) 157 if (!bitmap)
160 return -ENOMEM; 158 return -ENOMEM;
161 159
162 mutex_lock(&f->tsg_inuse_mutex); 160 mutex_lock(&sched->status_lock);
163 for (tsgid = 0; tsgid < f->num_channels; tsgid++) { 161 for (tsgid = 0; tsgid < f->num_channels; tsgid++) {
164 tsg = &f->tsg[tsgid]; 162 if (NVGPU_SCHED_ISSET(tsgid, sched->active_tsg_bitmap)) {
165 if ((tsg->in_use) && (tsg->tgid == tgid)) 163 tsg = &f->tsg[tsgid];
166 NVGPU_SCHED_SET(tsgid, bitmap); 164 if (tsg->tgid == tgid)
165 NVGPU_SCHED_SET(tsgid, bitmap);
166 }
167 } 167 }
168 mutex_unlock(&f->tsg_inuse_mutex); 168 mutex_unlock(&sched->status_lock);
169 169
170 if (copy_to_user((void __user *)(uintptr_t)arg->buffer, 170 if (copy_to_user((void __user *)(uintptr_t)arg->buffer,
171 bitmap, sched->bitmap_size)) 171 bitmap, sched->bitmap_size))
@@ -183,23 +183,15 @@ static int gk20a_sched_dev_ioctl_get_params(struct gk20a_sched_ctrl *sched,
183 struct fifo_gk20a *f = &g->fifo; 183 struct fifo_gk20a *f = &g->fifo;
184 struct tsg_gk20a *tsg; 184 struct tsg_gk20a *tsg;
185 u32 tsgid = arg->tsgid; 185 u32 tsgid = arg->tsgid;
186 int err = -ENXIO;
187 186
188 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); 187 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
189 188
190 if (tsgid >= f->num_channels) 189 if (tsgid >= f->num_channels)
191 return -EINVAL; 190 return -EINVAL;
192 191
193 mutex_lock(&f->tsg_inuse_mutex);
194 tsg = &f->tsg[tsgid]; 192 tsg = &f->tsg[tsgid];
195 if (!tsg->in_use) 193 if (!kref_get_unless_zero(&tsg->refcount))
196 goto unlock_in_use; 194 return -ENXIO;
197
198 mutex_lock(&sched->status_lock);
199 if (!NVGPU_SCHED_ISSET(tsgid, sched->active_tsg_bitmap)) {
200 gk20a_dbg(gpu_dbg_sched, "tsgid=%u not active", tsgid);
201 goto unlock_status;
202 }
203 195
204 arg->pid = tsg->tgid; /* kernel tgid corresponds to user pid */ 196 arg->pid = tsg->tgid; /* kernel tgid corresponds to user pid */
205 arg->runlist_interleave = tsg->interleave_level; 197 arg->runlist_interleave = tsg->interleave_level;
@@ -215,15 +207,9 @@ static int gk20a_sched_dev_ioctl_get_params(struct gk20a_sched_ctrl *sched,
215 arg->compute_preempt_mode = 0; 207 arg->compute_preempt_mode = 0;
216 } 208 }
217 209
218 err = 0; 210 kref_put(&tsg->refcount, gk20a_tsg_release);
219
220unlock_status:
221 mutex_unlock(&sched->status_lock);
222
223unlock_in_use:
224 mutex_unlock(&f->tsg_inuse_mutex);
225 211
226 return err; 212 return 0;
227} 213}
228 214
229static int gk20a_sched_dev_ioctl_tsg_set_timeslice( 215static int gk20a_sched_dev_ioctl_tsg_set_timeslice(
@@ -234,37 +220,27 @@ static int gk20a_sched_dev_ioctl_tsg_set_timeslice(
234 struct fifo_gk20a *f = &g->fifo; 220 struct fifo_gk20a *f = &g->fifo;
235 struct tsg_gk20a *tsg; 221 struct tsg_gk20a *tsg;
236 u32 tsgid = arg->tsgid; 222 u32 tsgid = arg->tsgid;
237 int err = -ENXIO; 223 int err;
238 224
239 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); 225 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
240 226
241 if (tsgid >= f->num_channels) 227 if (tsgid >= f->num_channels)
242 return -EINVAL; 228 return -EINVAL;
243 229
244 mutex_lock(&f->tsg_inuse_mutex);
245 tsg = &f->tsg[tsgid]; 230 tsg = &f->tsg[tsgid];
246 if (!tsg->in_use) 231 if (!kref_get_unless_zero(&tsg->refcount))
247 goto unlock_in_use; 232 return -ENXIO;
248
249 mutex_lock(&sched->status_lock);
250 if (NVGPU_SCHED_ISSET(tsgid, sched->recent_tsg_bitmap)) {
251 gk20a_dbg(gpu_dbg_sched, "tsgid=%u was re-allocated", tsgid);
252 goto unlock_status;
253 }
254 233
255 err = gk20a_busy(g->dev); 234 err = gk20a_busy(g->dev);
256 if (err) 235 if (err)
257 goto unlock_status; 236 goto done;
258 237
259 err = gk20a_tsg_set_timeslice(tsg, arg->timeslice); 238 err = gk20a_tsg_set_timeslice(tsg, arg->timeslice);
260 239
261 gk20a_idle(g->dev); 240 gk20a_idle(g->dev);
262 241
263unlock_status: 242done:
264 mutex_unlock(&sched->status_lock); 243 kref_put(&tsg->refcount, gk20a_tsg_release);
265
266unlock_in_use:
267 mutex_unlock(&f->tsg_inuse_mutex);
268 244
269 return err; 245 return err;
270} 246}
@@ -277,37 +253,27 @@ static int gk20a_sched_dev_ioctl_tsg_set_runlist_interleave(
277 struct fifo_gk20a *f = &g->fifo; 253 struct fifo_gk20a *f = &g->fifo;
278 struct tsg_gk20a *tsg; 254 struct tsg_gk20a *tsg;
279 u32 tsgid = arg->tsgid; 255 u32 tsgid = arg->tsgid;
280 int err = -ENXIO; 256 int err;
281 257
282 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); 258 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
283 259
284 if (tsgid >= f->num_channels) 260 if (tsgid >= f->num_channels)
285 return -EINVAL; 261 return -EINVAL;
286 262
287 mutex_lock(&f->tsg_inuse_mutex);
288 tsg = &f->tsg[tsgid]; 263 tsg = &f->tsg[tsgid];
289 if (!tsg->in_use) 264 if (!kref_get_unless_zero(&tsg->refcount))
290 goto unlock_in_use; 265 return -ENXIO;
291
292 mutex_lock(&sched->status_lock);
293 if (NVGPU_SCHED_ISSET(tsgid, sched->recent_tsg_bitmap)) {
294 gk20a_dbg(gpu_dbg_sched, "tsgid=%u was re-allocated", tsgid);
295 goto unlock_status;
296 }
297 266
298 err = gk20a_busy(g->dev); 267 err = gk20a_busy(g->dev);
299 if (err) 268 if (err)
300 goto unlock_status; 269 goto done;
301 270
302 err = gk20a_tsg_set_runlist_interleave(tsg, arg->runlist_interleave); 271 err = gk20a_tsg_set_runlist_interleave(tsg, arg->runlist_interleave);
303 272
304 gk20a_idle(g->dev); 273 gk20a_idle(g->dev);
305 274
306unlock_status: 275done:
307 mutex_unlock(&sched->status_lock); 276 kref_put(&tsg->refcount, gk20a_tsg_release);
308
309unlock_in_use:
310 mutex_unlock(&f->tsg_inuse_mutex);
311 277
312 return err; 278 return err;
313} 279}
@@ -332,6 +298,80 @@ static int gk20a_sched_dev_ioctl_unlock_control(struct gk20a_sched_ctrl *sched)
332 return 0; 298 return 0;
333} 299}
334 300
301static int gk20a_sched_dev_ioctl_get_api_version(struct gk20a_sched_ctrl *sched,
302 struct nvgpu_sched_api_version_args *args)
303{
304 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "");
305
306 args->version = NVGPU_SCHED_API_VERSION;
307 return 0;
308}
309
310static int gk20a_sched_dev_ioctl_get_tsg(struct gk20a_sched_ctrl *sched,
311 struct nvgpu_sched_tsg_refcount_args *arg)
312{
313 struct gk20a *g = sched->g;
314 struct fifo_gk20a *f = &g->fifo;
315 struct tsg_gk20a *tsg;
316 u32 tsgid = arg->tsgid;
317
318 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
319
320 if (tsgid >= f->num_channels)
321 return -EINVAL;
322
323 tsg = &f->tsg[tsgid];
324 if (!kref_get_unless_zero(&tsg->refcount))
325 return -ENXIO;
326
327 mutex_lock(&sched->status_lock);
328 if (NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) {
329 gk20a_warn(dev_from_gk20a(g),
330 "tsgid=%d already referenced", tsgid);
331 /* unlock status_lock as gk20a_tsg_release locks it */
332 mutex_unlock(&sched->status_lock);
333 kref_put(&tsg->refcount, gk20a_tsg_release);
334 return -ENXIO;
335 }
336
337 /* keep reference on TSG, will be released on
338 * NVGPU_SCHED_IOCTL_PUT_TSG ioctl, or close
339 */
340 NVGPU_SCHED_SET(tsgid, sched->ref_tsg_bitmap);
341 mutex_unlock(&sched->status_lock);
342
343 return 0;
344}
345
346static int gk20a_sched_dev_ioctl_put_tsg(struct gk20a_sched_ctrl *sched,
347 struct nvgpu_sched_tsg_refcount_args *arg)
348{
349 struct gk20a *g = sched->g;
350 struct fifo_gk20a *f = &g->fifo;
351 struct tsg_gk20a *tsg;
352 u32 tsgid = arg->tsgid;
353
354 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
355
356 if (tsgid >= f->num_channels)
357 return -EINVAL;
358
359 mutex_lock(&sched->status_lock);
360 if (!NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) {
361 mutex_unlock(&sched->status_lock);
362 gk20a_warn(dev_from_gk20a(g),
363 "tsgid=%d not previously referenced", tsgid);
364 return -ENXIO;
365 }
366 NVGPU_SCHED_CLR(tsgid, sched->ref_tsg_bitmap);
367 mutex_unlock(&sched->status_lock);
368
369 tsg = &f->tsg[tsgid];
370 kref_put(&tsg->refcount, gk20a_tsg_release);
371
372 return 0;
373}
374
335int gk20a_sched_dev_open(struct inode *inode, struct file *filp) 375int gk20a_sched_dev_open(struct inode *inode, struct file *filp)
336{ 376{
337 struct gk20a *g = container_of(inode->i_cdev, 377 struct gk20a *g = container_of(inode->i_cdev,
@@ -354,6 +394,7 @@ int gk20a_sched_dev_open(struct inode *inode, struct file *filp)
354 394
355 memcpy(sched->recent_tsg_bitmap, sched->active_tsg_bitmap, 395 memcpy(sched->recent_tsg_bitmap, sched->active_tsg_bitmap,
356 sched->bitmap_size); 396 sched->bitmap_size);
397 memset(sched->ref_tsg_bitmap, 0, sched->bitmap_size);
357 398
358 filp->private_data = sched; 399 filp->private_data = sched;
359 gk20a_dbg(gpu_dbg_sched, "filp=%p sched=%p", filp, sched); 400 gk20a_dbg(gpu_dbg_sched, "filp=%p sched=%p", filp, sched);
@@ -414,6 +455,18 @@ long gk20a_sched_dev_ioctl(struct file *filp, unsigned int cmd,
414 case NVGPU_SCHED_IOCTL_UNLOCK_CONTROL: 455 case NVGPU_SCHED_IOCTL_UNLOCK_CONTROL:
415 err = gk20a_sched_dev_ioctl_unlock_control(sched); 456 err = gk20a_sched_dev_ioctl_unlock_control(sched);
416 break; 457 break;
458 case NVGPU_SCHED_IOCTL_GET_API_VERSION:
459 err = gk20a_sched_dev_ioctl_get_api_version(sched,
460 (struct nvgpu_sched_api_version_args *)buf);
461 break;
462 case NVGPU_SCHED_IOCTL_GET_TSG:
463 err = gk20a_sched_dev_ioctl_get_tsg(sched,
464 (struct nvgpu_sched_tsg_refcount_args *)buf);
465 break;
466 case NVGPU_SCHED_IOCTL_PUT_TSG:
467 err = gk20a_sched_dev_ioctl_put_tsg(sched,
468 (struct nvgpu_sched_tsg_refcount_args *)buf);
469 break;
417 default: 470 default:
418 dev_dbg(dev_from_gk20a(g), "unrecognized gpu ioctl cmd: 0x%x", 471 dev_dbg(dev_from_gk20a(g), "unrecognized gpu ioctl cmd: 0x%x",
419 cmd); 472 cmd);
@@ -436,9 +489,21 @@ long gk20a_sched_dev_ioctl(struct file *filp, unsigned int cmd,
436int gk20a_sched_dev_release(struct inode *inode, struct file *filp) 489int gk20a_sched_dev_release(struct inode *inode, struct file *filp)
437{ 490{
438 struct gk20a_sched_ctrl *sched = filp->private_data; 491 struct gk20a_sched_ctrl *sched = filp->private_data;
492 struct gk20a *g = sched->g;
493 struct fifo_gk20a *f = &g->fifo;
494 struct tsg_gk20a *tsg;
495 int tsgid;
439 496
440 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "sched: %p", sched); 497 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "sched: %p", sched);
441 498
499 /* release any reference to TSGs */
500 for (tsgid = 0; tsgid < f->num_channels; tsgid++) {
501 if (NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) {
502 tsg = &f->tsg[tsgid];
503 kref_put(&tsg->refcount, gk20a_tsg_release);
504 }
505 }
506
442 /* unlock control */ 507 /* unlock control */
443 mutex_lock(&sched->control_lock); 508 mutex_lock(&sched->control_lock);
444 sched->control_locked = false; 509 sched->control_locked = false;
@@ -569,11 +634,15 @@ int gk20a_sched_ctrl_init(struct gk20a *g)
569 634
570 sched->active_tsg_bitmap = kzalloc(sched->bitmap_size, GFP_KERNEL); 635 sched->active_tsg_bitmap = kzalloc(sched->bitmap_size, GFP_KERNEL);
571 if (!sched->active_tsg_bitmap) 636 if (!sched->active_tsg_bitmap)
572 goto fail_active; 637 return -ENOMEM;
573 638
574 sched->recent_tsg_bitmap = kzalloc(sched->bitmap_size, GFP_KERNEL); 639 sched->recent_tsg_bitmap = kzalloc(sched->bitmap_size, GFP_KERNEL);
575 if (!sched->recent_tsg_bitmap) 640 if (!sched->recent_tsg_bitmap)
576 goto fail_recent; 641 goto free_active;
642
643 sched->ref_tsg_bitmap = kzalloc(sched->bitmap_size, GFP_KERNEL);
644 if (!sched->ref_tsg_bitmap)
645 goto free_recent;
577 646
578 init_waitqueue_head(&sched->readout_wq); 647 init_waitqueue_head(&sched->readout_wq);
579 mutex_init(&sched->status_lock); 648 mutex_init(&sched->status_lock);
@@ -584,10 +653,12 @@ int gk20a_sched_ctrl_init(struct gk20a *g)
584 653
585 return 0; 654 return 0;
586 655
587fail_recent: 656free_recent:
657 kfree(sched->recent_tsg_bitmap);
658
659free_active:
588 kfree(sched->active_tsg_bitmap); 660 kfree(sched->active_tsg_bitmap);
589 661
590fail_active:
591 return -ENOMEM; 662 return -ENOMEM;
592} 663}
593 664
@@ -597,7 +668,9 @@ void gk20a_sched_ctrl_cleanup(struct gk20a *g)
597 668
598 kfree(sched->active_tsg_bitmap); 669 kfree(sched->active_tsg_bitmap);
599 kfree(sched->recent_tsg_bitmap); 670 kfree(sched->recent_tsg_bitmap);
671 kfree(sched->ref_tsg_bitmap);
600 sched->active_tsg_bitmap = NULL; 672 sched->active_tsg_bitmap = NULL;
601 sched->recent_tsg_bitmap = NULL; 673 sched->recent_tsg_bitmap = NULL;
674 sched->ref_tsg_bitmap = NULL;
602 sched->sw_ready = false; 675 sched->sw_ready = false;
603} 676}
diff --git a/drivers/gpu/nvgpu/gk20a/sched_gk20a.h b/drivers/gpu/nvgpu/gk20a/sched_gk20a.h
index 8f533056..0ae13783 100644
--- a/drivers/gpu/nvgpu/gk20a/sched_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/sched_gk20a.h
@@ -32,6 +32,7 @@ struct gk20a_sched_ctrl {
32 size_t bitmap_size; 32 size_t bitmap_size;
33 u64 *active_tsg_bitmap; 33 u64 *active_tsg_bitmap;
34 u64 *recent_tsg_bitmap; 34 u64 *recent_tsg_bitmap;
35 u64 *ref_tsg_bitmap;
35 36
36 wait_queue_head_t readout_wq; 37 wait_queue_head_t readout_wq;
37}; 38};
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index af8f0f7b..3e83cd06 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -28,8 +28,6 @@
28#define NVGPU_TSG_MIN_TIMESLICE_US 1000 28#define NVGPU_TSG_MIN_TIMESLICE_US 1000
29#define NVGPU_TSG_MAX_TIMESLICE_US 50000 29#define NVGPU_TSG_MAX_TIMESLICE_US 50000
30 30
31static void gk20a_tsg_release(struct kref *ref);
32
33bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch) 31bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch)
34{ 32{
35 return !(ch->tsgid == NVGPU_INVALID_TSG_ID); 33 return !(ch->tsgid == NVGPU_INVALID_TSG_ID);
@@ -449,7 +447,7 @@ int gk20a_tsg_dev_open(struct inode *inode, struct file *filp)
449 return ret; 447 return ret;
450} 448}
451 449
452static void gk20a_tsg_release(struct kref *ref) 450void gk20a_tsg_release(struct kref *ref)
453{ 451{
454 struct tsg_gk20a *tsg = container_of(ref, struct tsg_gk20a, refcount); 452 struct tsg_gk20a *tsg = container_of(ref, struct tsg_gk20a, refcount);
455 struct gk20a *g = tsg->g; 453 struct gk20a *g = tsg->g;
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
index 2819dd1c..e1960102 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
@@ -23,6 +23,7 @@ bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch);
23 23
24int gk20a_tsg_dev_release(struct inode *inode, struct file *filp); 24int gk20a_tsg_dev_release(struct inode *inode, struct file *filp);
25int gk20a_tsg_dev_open(struct inode *inode, struct file *filp); 25int gk20a_tsg_dev_open(struct inode *inode, struct file *filp);
26void gk20a_tsg_release(struct kref *ref);
26int gk20a_tsg_open(struct gk20a *g, struct file *filp); 27int gk20a_tsg_open(struct gk20a *g, struct file *filp);
27long gk20a_tsg_dev_ioctl(struct file *filp, 28long gk20a_tsg_dev_ioctl(struct file *filp,
28 unsigned int cmd, unsigned long arg); 29 unsigned int cmd, unsigned long arg);
diff --git a/include/uapi/linux/nvgpu.h b/include/uapi/linux/nvgpu.h
index 17604d32..aa950dfa 100644
--- a/include/uapi/linux/nvgpu.h
+++ b/include/uapi/linux/nvgpu.h
@@ -1576,6 +1576,14 @@ struct nvgpu_sched_tsg_runlist_interleave_args {
1576 __u32 runlist_interleave; 1576 __u32 runlist_interleave;
1577}; 1577};
1578 1578
1579struct nvgpu_sched_api_version_args {
1580 __u32 version;
1581};
1582
1583struct nvgpu_sched_tsg_refcount_args {
1584 __u32 tsgid; /* in: TSG identifier */
1585};
1586
1579#define NVGPU_SCHED_IOCTL_GET_TSGS \ 1587#define NVGPU_SCHED_IOCTL_GET_TSGS \
1580 _IOWR(NVGPU_SCHED_IOCTL_MAGIC, 1, \ 1588 _IOWR(NVGPU_SCHED_IOCTL_MAGIC, 1, \
1581 struct nvgpu_sched_get_tsgs_args) 1589 struct nvgpu_sched_get_tsgs_args)
@@ -1598,9 +1606,17 @@ struct nvgpu_sched_tsg_runlist_interleave_args {
1598 _IO(NVGPU_SCHED_IOCTL_MAGIC, 7) 1606 _IO(NVGPU_SCHED_IOCTL_MAGIC, 7)
1599#define NVGPU_SCHED_IOCTL_UNLOCK_CONTROL \ 1607#define NVGPU_SCHED_IOCTL_UNLOCK_CONTROL \
1600 _IO(NVGPU_SCHED_IOCTL_MAGIC, 8) 1608 _IO(NVGPU_SCHED_IOCTL_MAGIC, 8)
1601 1609#define NVGPU_SCHED_IOCTL_GET_API_VERSION \
1610 _IOR(NVGPU_SCHED_IOCTL_MAGIC, 9, \
1611 struct nvgpu_sched_api_version_args)
1612#define NVGPU_SCHED_IOCTL_GET_TSG \
1613 _IOW(NVGPU_SCHED_IOCTL_MAGIC, 10, \
1614 struct nvgpu_sched_tsg_refcount_args)
1615#define NVGPU_SCHED_IOCTL_PUT_TSG \
1616 _IOW(NVGPU_SCHED_IOCTL_MAGIC, 11, \
1617 struct nvgpu_sched_tsg_refcount_args)
1602#define NVGPU_SCHED_IOCTL_LAST \ 1618#define NVGPU_SCHED_IOCTL_LAST \
1603 _IOC_NR(NVGPU_SCHED_IOCTL_UNLOCK_CONTROL) 1619 _IOC_NR(NVGPU_SCHED_IOCTL_PUT_TSG)
1604 1620
1605#define NVGPU_SCHED_IOCTL_MAX_ARG_SIZE \ 1621#define NVGPU_SCHED_IOCTL_MAX_ARG_SIZE \
1606 sizeof(struct nvgpu_sched_tsg_get_params_args) 1622 sizeof(struct nvgpu_sched_tsg_get_params_args)
@@ -1620,4 +1636,6 @@ struct nvgpu_sched_event_arg {
1620 __u64 status; 1636 __u64 status;
1621}; 1637};
1622 1638
1639#define NVGPU_SCHED_API_VERSION 1
1640
1623#endif 1641#endif