aboutsummaryrefslogtreecommitdiffstats
path: root/include/os/linux/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'include/os/linux/sched.c')
-rw-r--r--include/os/linux/sched.c666
1 files changed, 0 insertions, 666 deletions
diff --git a/include/os/linux/sched.c b/include/os/linux/sched.c
deleted file mode 100644
index 30c58a1..0000000
--- a/include/os/linux/sched.c
+++ /dev/null
@@ -1,666 +0,0 @@
1/*
2 * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#include <asm/barrier.h>
17#include <linux/wait.h>
18#include <linux/uaccess.h>
19#include <linux/poll.h>
20#include <uapi/linux/nvgpu.h>
21
22#include <nvgpu/kmem.h>
23#include <nvgpu/log.h>
24#include <nvgpu/bug.h>
25#include <nvgpu/barrier.h>
26#include <nvgpu/gk20a.h>
27
28#include "gk20a/gr_gk20a.h"
29#include "sched.h"
30#include "os_linux.h"
31#include "ioctl_tsg.h"
32
33#include <nvgpu/hw/gk20a/hw_ctxsw_prog_gk20a.h>
34#include <nvgpu/hw/gk20a/hw_gr_gk20a.h>
35
36ssize_t gk20a_sched_dev_read(struct file *filp, char __user *buf,
37 size_t size, loff_t *off)
38{
39 struct gk20a *g = filp->private_data;
40 struct nvgpu_sched_ctrl *sched = &g->sched_ctrl;
41 struct nvgpu_sched_event_arg event = { 0 };
42 int err;
43
44 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched,
45 "filp=%p buf=%p size=%zu", filp, buf, size);
46
47 if (size < sizeof(event))
48 return -EINVAL;
49 size = sizeof(event);
50
51 nvgpu_mutex_acquire(&sched->status_lock);
52 while (!sched->status) {
53 nvgpu_mutex_release(&sched->status_lock);
54 if (filp->f_flags & O_NONBLOCK)
55 return -EAGAIN;
56 err = NVGPU_COND_WAIT_INTERRUPTIBLE(&sched->readout_wq,
57 sched->status, 0);
58 if (err)
59 return err;
60 nvgpu_mutex_acquire(&sched->status_lock);
61 }
62
63 event.reserved = 0;
64 event.status = sched->status;
65
66 if (copy_to_user(buf, &event, size)) {
67 nvgpu_mutex_release(&sched->status_lock);
68 return -EFAULT;
69 }
70
71 sched->status = 0;
72
73 nvgpu_mutex_release(&sched->status_lock);
74
75 return size;
76}
77
78unsigned int gk20a_sched_dev_poll(struct file *filp, poll_table *wait)
79{
80 struct gk20a *g = filp->private_data;
81 struct nvgpu_sched_ctrl *sched = &g->sched_ctrl;
82 unsigned int mask = 0;
83
84 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " ");
85
86 nvgpu_mutex_acquire(&sched->status_lock);
87 poll_wait(filp, &sched->readout_wq.wq, wait);
88 if (sched->status)
89 mask |= POLLIN | POLLRDNORM;
90 nvgpu_mutex_release(&sched->status_lock);
91
92 return mask;
93}
94
95static int gk20a_sched_dev_ioctl_get_tsgs(struct gk20a *g,
96 struct nvgpu_sched_get_tsgs_args *arg)
97{
98 struct nvgpu_sched_ctrl *sched = &g->sched_ctrl;
99
100 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx",
101 arg->size, arg->buffer);
102
103 if ((arg->size < sched->bitmap_size) || (!arg->buffer)) {
104 arg->size = sched->bitmap_size;
105 return -ENOSPC;
106 }
107
108 nvgpu_mutex_acquire(&sched->status_lock);
109 if (copy_to_user((void __user *)(uintptr_t)arg->buffer,
110 sched->active_tsg_bitmap, sched->bitmap_size)) {
111 nvgpu_mutex_release(&sched->status_lock);
112 return -EFAULT;
113 }
114 nvgpu_mutex_release(&sched->status_lock);
115
116 return 0;
117}
118
119static int gk20a_sched_dev_ioctl_get_recent_tsgs(struct gk20a *g,
120 struct nvgpu_sched_get_tsgs_args *arg)
121{
122 struct nvgpu_sched_ctrl *sched = &g->sched_ctrl;
123
124 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx",
125 arg->size, arg->buffer);
126
127 if ((arg->size < sched->bitmap_size) || (!arg->buffer)) {
128 arg->size = sched->bitmap_size;
129 return -ENOSPC;
130 }
131
132 nvgpu_mutex_acquire(&sched->status_lock);
133 if (copy_to_user((void __user *)(uintptr_t)arg->buffer,
134 sched->recent_tsg_bitmap, sched->bitmap_size)) {
135 nvgpu_mutex_release(&sched->status_lock);
136 return -EFAULT;
137 }
138
139 memset(sched->recent_tsg_bitmap, 0, sched->bitmap_size);
140 nvgpu_mutex_release(&sched->status_lock);
141
142 return 0;
143}
144
145static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a *g,
146 struct nvgpu_sched_get_tsgs_by_pid_args *arg)
147{
148 struct nvgpu_sched_ctrl *sched = &g->sched_ctrl;
149 struct fifo_gk20a *f = &g->fifo;
150 struct tsg_gk20a *tsg;
151 u64 *bitmap;
152 unsigned int tsgid;
153 /* pid at user level corresponds to kernel tgid */
154 pid_t tgid = (pid_t)arg->pid;
155 int err = 0;
156
157 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "pid=%d size=%u buffer=%llx",
158 (pid_t)arg->pid, arg->size, arg->buffer);
159
160 if ((arg->size < sched->bitmap_size) || (!arg->buffer)) {
161 arg->size = sched->bitmap_size;
162 return -ENOSPC;
163 }
164
165 bitmap = nvgpu_kzalloc(g, sched->bitmap_size);
166 if (!bitmap)
167 return -ENOMEM;
168
169 nvgpu_mutex_acquire(&sched->status_lock);
170 for (tsgid = 0; tsgid < f->num_channels; tsgid++) {
171 if (NVGPU_SCHED_ISSET(tsgid, sched->active_tsg_bitmap)) {
172 tsg = &f->tsg[tsgid];
173 if (tsg->tgid == tgid)
174 NVGPU_SCHED_SET(tsgid, bitmap);
175 }
176 }
177 nvgpu_mutex_release(&sched->status_lock);
178
179 if (copy_to_user((void __user *)(uintptr_t)arg->buffer,
180 bitmap, sched->bitmap_size))
181 err = -EFAULT;
182
183 nvgpu_kfree(g, bitmap);
184
185 return err;
186}
187
188static int gk20a_sched_dev_ioctl_get_params(struct gk20a *g,
189 struct nvgpu_sched_tsg_get_params_args *arg)
190{
191 struct fifo_gk20a *f = &g->fifo;
192 struct tsg_gk20a *tsg;
193 u32 tsgid = arg->tsgid;
194
195 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
196
197 if (tsgid >= f->num_channels)
198 return -EINVAL;
199
200 nvgpu_speculation_barrier();
201
202 tsg = &f->tsg[tsgid];
203 if (!nvgpu_ref_get_unless_zero(&tsg->refcount))
204 return -ENXIO;
205
206 arg->pid = tsg->tgid; /* kernel tgid corresponds to user pid */
207 arg->runlist_interleave = tsg->interleave_level;
208 arg->timeslice = gk20a_tsg_get_timeslice(tsg);
209
210 arg->graphics_preempt_mode =
211 tsg->gr_ctx.graphics_preempt_mode;
212 arg->compute_preempt_mode =
213 tsg->gr_ctx.compute_preempt_mode;
214
215 nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release);
216
217 return 0;
218}
219
220static int gk20a_sched_dev_ioctl_tsg_set_timeslice(
221 struct gk20a *g,
222 struct nvgpu_sched_tsg_timeslice_args *arg)
223{
224 struct fifo_gk20a *f = &g->fifo;
225 struct tsg_gk20a *tsg;
226 u32 tsgid = arg->tsgid;
227 int err;
228
229 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
230
231 if (tsgid >= f->num_channels)
232 return -EINVAL;
233
234 nvgpu_speculation_barrier();
235
236 tsg = &f->tsg[tsgid];
237 if (!nvgpu_ref_get_unless_zero(&tsg->refcount))
238 return -ENXIO;
239
240 err = gk20a_busy(g);
241 if (err)
242 goto done;
243
244 err = gk20a_tsg_set_timeslice(tsg, arg->timeslice);
245
246 gk20a_idle(g);
247
248done:
249 nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release);
250
251 return err;
252}
253
254static int gk20a_sched_dev_ioctl_tsg_set_runlist_interleave(
255 struct gk20a *g,
256 struct nvgpu_sched_tsg_runlist_interleave_args *arg)
257{
258 struct fifo_gk20a *f = &g->fifo;
259 struct tsg_gk20a *tsg;
260 u32 tsgid = arg->tsgid;
261 int err;
262
263 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
264
265 if (tsgid >= f->num_channels)
266 return -EINVAL;
267
268 nvgpu_speculation_barrier();
269
270 tsg = &f->tsg[tsgid];
271 if (!nvgpu_ref_get_unless_zero(&tsg->refcount))
272 return -ENXIO;
273
274 err = gk20a_busy(g);
275 if (err)
276 goto done;
277
278 err = gk20a_tsg_set_runlist_interleave(tsg, arg->runlist_interleave);
279
280 gk20a_idle(g);
281
282done:
283 nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release);
284
285 return err;
286}
287
288static int gk20a_sched_dev_ioctl_lock_control(struct gk20a *g)
289{
290 struct nvgpu_sched_ctrl *sched = &g->sched_ctrl;
291
292 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " ");
293
294 nvgpu_mutex_acquire(&sched->control_lock);
295 sched->control_locked = true;
296 nvgpu_mutex_release(&sched->control_lock);
297 return 0;
298}
299
300static int gk20a_sched_dev_ioctl_unlock_control(struct gk20a *g)
301{
302 struct nvgpu_sched_ctrl *sched = &g->sched_ctrl;
303
304 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " ");
305
306 nvgpu_mutex_acquire(&sched->control_lock);
307 sched->control_locked = false;
308 nvgpu_mutex_release(&sched->control_lock);
309 return 0;
310}
311
312static int gk20a_sched_dev_ioctl_get_api_version(struct gk20a *g,
313 struct nvgpu_sched_api_version_args *args)
314{
315 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " ");
316
317 args->version = NVGPU_SCHED_API_VERSION;
318 return 0;
319}
320
321static int gk20a_sched_dev_ioctl_get_tsg(struct gk20a *g,
322 struct nvgpu_sched_tsg_refcount_args *arg)
323{
324 struct nvgpu_sched_ctrl *sched = &g->sched_ctrl;
325 struct fifo_gk20a *f = &g->fifo;
326 struct tsg_gk20a *tsg;
327 u32 tsgid = arg->tsgid;
328
329 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
330
331 if (tsgid >= f->num_channels)
332 return -EINVAL;
333
334 nvgpu_speculation_barrier();
335
336 tsg = &f->tsg[tsgid];
337 if (!nvgpu_ref_get_unless_zero(&tsg->refcount))
338 return -ENXIO;
339
340 nvgpu_mutex_acquire(&sched->status_lock);
341 if (NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) {
342 nvgpu_warn(g, "tsgid=%d already referenced", tsgid);
343 /* unlock status_lock as nvgpu_ioctl_tsg_release locks it */
344 nvgpu_mutex_release(&sched->status_lock);
345 nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release);
346 return -ENXIO;
347 }
348
349 /* keep reference on TSG, will be released on
350 * NVGPU_SCHED_IOCTL_PUT_TSG ioctl, or close
351 */
352 NVGPU_SCHED_SET(tsgid, sched->ref_tsg_bitmap);
353 nvgpu_mutex_release(&sched->status_lock);
354
355 return 0;
356}
357
358static int gk20a_sched_dev_ioctl_put_tsg(struct gk20a *g,
359 struct nvgpu_sched_tsg_refcount_args *arg)
360{
361 struct nvgpu_sched_ctrl *sched = &g->sched_ctrl;
362 struct fifo_gk20a *f = &g->fifo;
363 struct tsg_gk20a *tsg;
364 u32 tsgid = arg->tsgid;
365
366 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
367
368 if (tsgid >= f->num_channels)
369 return -EINVAL;
370
371 nvgpu_speculation_barrier();
372
373 nvgpu_mutex_acquire(&sched->status_lock);
374 if (!NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) {
375 nvgpu_mutex_release(&sched->status_lock);
376 nvgpu_warn(g, "tsgid=%d not previously referenced", tsgid);
377 return -ENXIO;
378 }
379 NVGPU_SCHED_CLR(tsgid, sched->ref_tsg_bitmap);
380 nvgpu_mutex_release(&sched->status_lock);
381
382 tsg = &f->tsg[tsgid];
383 nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release);
384
385 return 0;
386}
387
388int gk20a_sched_dev_open(struct inode *inode, struct file *filp)
389{
390 struct nvgpu_os_linux *l = container_of(inode->i_cdev,
391 struct nvgpu_os_linux, sched.cdev);
392 struct gk20a *g;
393 struct nvgpu_sched_ctrl *sched;
394 int err = 0;
395
396 g = gk20a_get(&l->g);
397 if (!g)
398 return -ENODEV;
399 sched = &g->sched_ctrl;
400
401 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "g=%p", g);
402
403 if (!sched->sw_ready) {
404 err = gk20a_busy(g);
405 if (err)
406 goto free_ref;
407
408 gk20a_idle(g);
409 }
410
411 if (!nvgpu_mutex_tryacquire(&sched->busy_lock)) {
412 err = -EBUSY;
413 goto free_ref;
414 }
415
416 memcpy(sched->recent_tsg_bitmap, sched->active_tsg_bitmap,
417 sched->bitmap_size);
418 memset(sched->ref_tsg_bitmap, 0, sched->bitmap_size);
419
420 filp->private_data = g;
421 nvgpu_log(g, gpu_dbg_sched, "filp=%p sched=%p", filp, sched);
422
423free_ref:
424 if (err)
425 gk20a_put(g);
426 return err;
427}
428
429long gk20a_sched_dev_ioctl(struct file *filp, unsigned int cmd,
430 unsigned long arg)
431{
432 struct gk20a *g = filp->private_data;
433 u8 buf[NVGPU_CTXSW_IOCTL_MAX_ARG_SIZE];
434 int err = 0;
435
436 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "nr=%d", _IOC_NR(cmd));
437
438 if ((_IOC_TYPE(cmd) != NVGPU_SCHED_IOCTL_MAGIC) ||
439 (_IOC_NR(cmd) == 0) ||
440 (_IOC_NR(cmd) > NVGPU_SCHED_IOCTL_LAST) ||
441 (_IOC_SIZE(cmd) > NVGPU_SCHED_IOCTL_MAX_ARG_SIZE))
442 return -EINVAL;
443
444 memset(buf, 0, sizeof(buf));
445 if (_IOC_DIR(cmd) & _IOC_WRITE) {
446 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
447 return -EFAULT;
448 }
449
450 nvgpu_speculation_barrier();
451 switch (cmd) {
452 case NVGPU_SCHED_IOCTL_GET_TSGS:
453 err = gk20a_sched_dev_ioctl_get_tsgs(g,
454 (struct nvgpu_sched_get_tsgs_args *)buf);
455 break;
456 case NVGPU_SCHED_IOCTL_GET_RECENT_TSGS:
457 err = gk20a_sched_dev_ioctl_get_recent_tsgs(g,
458 (struct nvgpu_sched_get_tsgs_args *)buf);
459 break;
460 case NVGPU_SCHED_IOCTL_GET_TSGS_BY_PID:
461 err = gk20a_sched_dev_ioctl_get_tsgs_by_pid(g,
462 (struct nvgpu_sched_get_tsgs_by_pid_args *)buf);
463 break;
464 case NVGPU_SCHED_IOCTL_TSG_GET_PARAMS:
465 err = gk20a_sched_dev_ioctl_get_params(g,
466 (struct nvgpu_sched_tsg_get_params_args *)buf);
467 break;
468 case NVGPU_SCHED_IOCTL_TSG_SET_TIMESLICE:
469 err = gk20a_sched_dev_ioctl_tsg_set_timeslice(g,
470 (struct nvgpu_sched_tsg_timeslice_args *)buf);
471 break;
472 case NVGPU_SCHED_IOCTL_TSG_SET_RUNLIST_INTERLEAVE:
473 err = gk20a_sched_dev_ioctl_tsg_set_runlist_interleave(g,
474 (struct nvgpu_sched_tsg_runlist_interleave_args *)buf);
475 break;
476 case NVGPU_SCHED_IOCTL_LOCK_CONTROL:
477 err = gk20a_sched_dev_ioctl_lock_control(g);
478 break;
479 case NVGPU_SCHED_IOCTL_UNLOCK_CONTROL:
480 err = gk20a_sched_dev_ioctl_unlock_control(g);
481 break;
482 case NVGPU_SCHED_IOCTL_GET_API_VERSION:
483 err = gk20a_sched_dev_ioctl_get_api_version(g,
484 (struct nvgpu_sched_api_version_args *)buf);
485 break;
486 case NVGPU_SCHED_IOCTL_GET_TSG:
487 err = gk20a_sched_dev_ioctl_get_tsg(g,
488 (struct nvgpu_sched_tsg_refcount_args *)buf);
489 break;
490 case NVGPU_SCHED_IOCTL_PUT_TSG:
491 err = gk20a_sched_dev_ioctl_put_tsg(g,
492 (struct nvgpu_sched_tsg_refcount_args *)buf);
493 break;
494 default:
495 nvgpu_log_info(g, "unrecognized gpu ioctl cmd: 0x%x", cmd);
496 err = -ENOTTY;
497 }
498
499 /* Some ioctls like NVGPU_SCHED_IOCTL_GET_TSGS might be called on
500 * purpose with NULL buffer and/or zero size to discover TSG bitmap
501 * size. We need to update user arguments in this case too, even
502 * if we return an error.
503 */
504 if ((!err || (err == -ENOSPC)) && (_IOC_DIR(cmd) & _IOC_READ)) {
505 if (copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd)))
506 err = -EFAULT;
507 }
508
509 return err;
510}
511
512int gk20a_sched_dev_release(struct inode *inode, struct file *filp)
513{
514 struct gk20a *g = filp->private_data;
515 struct nvgpu_sched_ctrl *sched = &g->sched_ctrl;
516 struct fifo_gk20a *f = &g->fifo;
517 struct tsg_gk20a *tsg;
518 unsigned int tsgid;
519
520 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "sched: %p", sched);
521
522 /* release any reference to TSGs */
523 for (tsgid = 0; tsgid < f->num_channels; tsgid++) {
524 if (NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) {
525 tsg = &f->tsg[tsgid];
526 nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release);
527 }
528 }
529
530 /* unlock control */
531 nvgpu_mutex_acquire(&sched->control_lock);
532 sched->control_locked = false;
533 nvgpu_mutex_release(&sched->control_lock);
534
535 nvgpu_mutex_release(&sched->busy_lock);
536 gk20a_put(g);
537 return 0;
538}
539
540void gk20a_sched_ctrl_tsg_added(struct gk20a *g, struct tsg_gk20a *tsg)
541{
542 struct nvgpu_sched_ctrl *sched = &g->sched_ctrl;
543 int err;
544
545 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
546
547 if (!sched->sw_ready) {
548 err = gk20a_busy(g);
549 if (err) {
550 WARN_ON(err);
551 return;
552 }
553
554 gk20a_idle(g);
555 }
556
557 nvgpu_mutex_acquire(&sched->status_lock);
558 NVGPU_SCHED_SET(tsg->tsgid, sched->active_tsg_bitmap);
559 NVGPU_SCHED_SET(tsg->tsgid, sched->recent_tsg_bitmap);
560 sched->status |= NVGPU_SCHED_STATUS_TSG_OPEN;
561 nvgpu_mutex_release(&sched->status_lock);
562 nvgpu_cond_signal_interruptible(&sched->readout_wq);
563}
564
565void gk20a_sched_ctrl_tsg_removed(struct gk20a *g, struct tsg_gk20a *tsg)
566{
567 struct nvgpu_sched_ctrl *sched = &g->sched_ctrl;
568
569 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
570
571 nvgpu_mutex_acquire(&sched->status_lock);
572 NVGPU_SCHED_CLR(tsg->tsgid, sched->active_tsg_bitmap);
573
574 /* clear recent_tsg_bitmap as well: if app manager did not
575 * notice that TSG was previously added, no need to notify it
576 * if the TSG has been released in the meantime. If the
577 * TSG gets reallocated, app manager will be notified as usual.
578 */
579 NVGPU_SCHED_CLR(tsg->tsgid, sched->recent_tsg_bitmap);
580
581 /* do not set event_pending, we only want to notify app manager
582 * when TSGs are added, so that it can apply sched params
583 */
584 nvgpu_mutex_release(&sched->status_lock);
585}
586
587int gk20a_sched_ctrl_init(struct gk20a *g)
588{
589 struct nvgpu_sched_ctrl *sched = &g->sched_ctrl;
590 struct fifo_gk20a *f = &g->fifo;
591 int err;
592
593 if (sched->sw_ready)
594 return 0;
595
596 sched->bitmap_size = roundup(f->num_channels, 64) / 8;
597 sched->status = 0;
598
599 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "g=%p sched=%p size=%zu",
600 g, sched, sched->bitmap_size);
601
602 sched->active_tsg_bitmap = nvgpu_kzalloc(g, sched->bitmap_size);
603 if (!sched->active_tsg_bitmap)
604 return -ENOMEM;
605
606 sched->recent_tsg_bitmap = nvgpu_kzalloc(g, sched->bitmap_size);
607 if (!sched->recent_tsg_bitmap) {
608 err = -ENOMEM;
609 goto free_active;
610 }
611
612 sched->ref_tsg_bitmap = nvgpu_kzalloc(g, sched->bitmap_size);
613 if (!sched->ref_tsg_bitmap) {
614 err = -ENOMEM;
615 goto free_recent;
616 }
617
618 nvgpu_cond_init(&sched->readout_wq);
619
620 err = nvgpu_mutex_init(&sched->status_lock);
621 if (err)
622 goto free_ref;
623
624 err = nvgpu_mutex_init(&sched->control_lock);
625 if (err)
626 goto free_status_lock;
627
628 err = nvgpu_mutex_init(&sched->busy_lock);
629 if (err)
630 goto free_control_lock;
631
632 sched->sw_ready = true;
633
634 return 0;
635
636free_control_lock:
637 nvgpu_mutex_destroy(&sched->control_lock);
638free_status_lock:
639 nvgpu_mutex_destroy(&sched->status_lock);
640free_ref:
641 nvgpu_kfree(g, sched->ref_tsg_bitmap);
642free_recent:
643 nvgpu_kfree(g, sched->recent_tsg_bitmap);
644free_active:
645 nvgpu_kfree(g, sched->active_tsg_bitmap);
646
647 return err;
648}
649
650void gk20a_sched_ctrl_cleanup(struct gk20a *g)
651{
652 struct nvgpu_sched_ctrl *sched = &g->sched_ctrl;
653
654 nvgpu_kfree(g, sched->active_tsg_bitmap);
655 nvgpu_kfree(g, sched->recent_tsg_bitmap);
656 nvgpu_kfree(g, sched->ref_tsg_bitmap);
657 sched->active_tsg_bitmap = NULL;
658 sched->recent_tsg_bitmap = NULL;
659 sched->ref_tsg_bitmap = NULL;
660
661 nvgpu_mutex_destroy(&sched->status_lock);
662 nvgpu_mutex_destroy(&sched->control_lock);
663 nvgpu_mutex_destroy(&sched->busy_lock);
664
665 sched->sw_ready = false;
666}