summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/sched.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/sched.c657
1 files changed, 657 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/sched.c b/drivers/gpu/nvgpu/common/linux/sched.c
new file mode 100644
index 00000000..fc3f6ed8
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/sched.c
@@ -0,0 +1,657 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#include <asm/barrier.h>
17#include <linux/wait.h>
18#include <linux/uaccess.h>
19#include <linux/poll.h>
20#include <uapi/linux/nvgpu.h>
21
22#include <nvgpu/kmem.h>
23#include <nvgpu/log.h>
24#include <nvgpu/bug.h>
25
26#include "gk20a/gk20a.h"
27#include "gk20a/gr_gk20a.h"
28#include "sched.h"
29#include "os_linux.h"
30#include "ioctl_tsg.h"
31
32#include <nvgpu/hw/gk20a/hw_ctxsw_prog_gk20a.h>
33#include <nvgpu/hw/gk20a/hw_gr_gk20a.h>
34
35ssize_t gk20a_sched_dev_read(struct file *filp, char __user *buf,
36 size_t size, loff_t *off)
37{
38 struct gk20a_sched_ctrl *sched = filp->private_data;
39 struct nvgpu_sched_event_arg event = { 0 };
40 int err;
41
42 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched,
43 "filp=%p buf=%p size=%zu", filp, buf, size);
44
45 if (size < sizeof(event))
46 return -EINVAL;
47 size = sizeof(event);
48
49 nvgpu_mutex_acquire(&sched->status_lock);
50 while (!sched->status) {
51 nvgpu_mutex_release(&sched->status_lock);
52 if (filp->f_flags & O_NONBLOCK)
53 return -EAGAIN;
54 err = NVGPU_COND_WAIT_INTERRUPTIBLE(&sched->readout_wq,
55 sched->status, 0);
56 if (err)
57 return err;
58 nvgpu_mutex_acquire(&sched->status_lock);
59 }
60
61 event.reserved = 0;
62 event.status = sched->status;
63
64 if (copy_to_user(buf, &event, size)) {
65 nvgpu_mutex_release(&sched->status_lock);
66 return -EFAULT;
67 }
68
69 sched->status = 0;
70
71 nvgpu_mutex_release(&sched->status_lock);
72
73 return size;
74}
75
76unsigned int gk20a_sched_dev_poll(struct file *filp, poll_table *wait)
77{
78 struct gk20a_sched_ctrl *sched = filp->private_data;
79 unsigned int mask = 0;
80
81 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "");
82
83 nvgpu_mutex_acquire(&sched->status_lock);
84 poll_wait(filp, &sched->readout_wq.wq, wait);
85 if (sched->status)
86 mask |= POLLIN | POLLRDNORM;
87 nvgpu_mutex_release(&sched->status_lock);
88
89 return mask;
90}
91
92static int gk20a_sched_dev_ioctl_get_tsgs(struct gk20a_sched_ctrl *sched,
93 struct nvgpu_sched_get_tsgs_args *arg)
94{
95 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx",
96 arg->size, arg->buffer);
97
98 if ((arg->size < sched->bitmap_size) || (!arg->buffer)) {
99 arg->size = sched->bitmap_size;
100 return -ENOSPC;
101 }
102
103 nvgpu_mutex_acquire(&sched->status_lock);
104 if (copy_to_user((void __user *)(uintptr_t)arg->buffer,
105 sched->active_tsg_bitmap, sched->bitmap_size)) {
106 nvgpu_mutex_release(&sched->status_lock);
107 return -EFAULT;
108 }
109 nvgpu_mutex_release(&sched->status_lock);
110
111 return 0;
112}
113
114static int gk20a_sched_dev_ioctl_get_recent_tsgs(struct gk20a_sched_ctrl *sched,
115 struct nvgpu_sched_get_tsgs_args *arg)
116{
117 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx",
118 arg->size, arg->buffer);
119
120 if ((arg->size < sched->bitmap_size) || (!arg->buffer)) {
121 arg->size = sched->bitmap_size;
122 return -ENOSPC;
123 }
124
125 nvgpu_mutex_acquire(&sched->status_lock);
126 if (copy_to_user((void __user *)(uintptr_t)arg->buffer,
127 sched->recent_tsg_bitmap, sched->bitmap_size)) {
128 nvgpu_mutex_release(&sched->status_lock);
129 return -EFAULT;
130 }
131
132 memset(sched->recent_tsg_bitmap, 0, sched->bitmap_size);
133 nvgpu_mutex_release(&sched->status_lock);
134
135 return 0;
136}
137
138static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched,
139 struct nvgpu_sched_get_tsgs_by_pid_args *arg)
140{
141 struct fifo_gk20a *f = &sched->g->fifo;
142 struct tsg_gk20a *tsg;
143 u64 *bitmap;
144 unsigned int tsgid;
145 /* pid at user level corresponds to kernel tgid */
146 pid_t tgid = (pid_t)arg->pid;
147 int err = 0;
148
149 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "pid=%d size=%u buffer=%llx",
150 (pid_t)arg->pid, arg->size, arg->buffer);
151
152 if ((arg->size < sched->bitmap_size) || (!arg->buffer)) {
153 arg->size = sched->bitmap_size;
154 return -ENOSPC;
155 }
156
157 bitmap = nvgpu_kzalloc(sched->g, sched->bitmap_size);
158 if (!bitmap)
159 return -ENOMEM;
160
161 nvgpu_mutex_acquire(&sched->status_lock);
162 for (tsgid = 0; tsgid < f->num_channels; tsgid++) {
163 if (NVGPU_SCHED_ISSET(tsgid, sched->active_tsg_bitmap)) {
164 tsg = &f->tsg[tsgid];
165 if (tsg->tgid == tgid)
166 NVGPU_SCHED_SET(tsgid, bitmap);
167 }
168 }
169 nvgpu_mutex_release(&sched->status_lock);
170
171 if (copy_to_user((void __user *)(uintptr_t)arg->buffer,
172 bitmap, sched->bitmap_size))
173 err = -EFAULT;
174
175 nvgpu_kfree(sched->g, bitmap);
176
177 return err;
178}
179
180static int gk20a_sched_dev_ioctl_get_params(struct gk20a_sched_ctrl *sched,
181 struct nvgpu_sched_tsg_get_params_args *arg)
182{
183 struct gk20a *g = sched->g;
184 struct fifo_gk20a *f = &g->fifo;
185 struct tsg_gk20a *tsg;
186 u32 tsgid = arg->tsgid;
187
188 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
189
190 if (tsgid >= f->num_channels)
191 return -EINVAL;
192
193 tsg = &f->tsg[tsgid];
194 if (!nvgpu_ref_get_unless_zero(&tsg->refcount))
195 return -ENXIO;
196
197 arg->pid = tsg->tgid; /* kernel tgid corresponds to user pid */
198 arg->runlist_interleave = tsg->interleave_level;
199 arg->timeslice = tsg->timeslice_us;
200
201 if (tsg->tsg_gr_ctx) {
202 arg->graphics_preempt_mode =
203 tsg->tsg_gr_ctx->graphics_preempt_mode;
204 arg->compute_preempt_mode =
205 tsg->tsg_gr_ctx->compute_preempt_mode;
206 } else {
207 arg->graphics_preempt_mode = 0;
208 arg->compute_preempt_mode = 0;
209 }
210
211 nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release);
212
213 return 0;
214}
215
216static int gk20a_sched_dev_ioctl_tsg_set_timeslice(
217 struct gk20a_sched_ctrl *sched,
218 struct nvgpu_sched_tsg_timeslice_args *arg)
219{
220 struct gk20a *g = sched->g;
221 struct fifo_gk20a *f = &g->fifo;
222 struct tsg_gk20a *tsg;
223 u32 tsgid = arg->tsgid;
224 int err;
225
226 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
227
228 if (tsgid >= f->num_channels)
229 return -EINVAL;
230
231 tsg = &f->tsg[tsgid];
232 if (!nvgpu_ref_get_unless_zero(&tsg->refcount))
233 return -ENXIO;
234
235 err = gk20a_busy(g);
236 if (err)
237 goto done;
238
239 err = gk20a_tsg_set_timeslice(tsg, arg->timeslice);
240
241 gk20a_idle(g);
242
243done:
244 nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release);
245
246 return err;
247}
248
249static int gk20a_sched_dev_ioctl_tsg_set_runlist_interleave(
250 struct gk20a_sched_ctrl *sched,
251 struct nvgpu_sched_tsg_runlist_interleave_args *arg)
252{
253 struct gk20a *g = sched->g;
254 struct fifo_gk20a *f = &g->fifo;
255 struct tsg_gk20a *tsg;
256 u32 tsgid = arg->tsgid;
257 int err;
258
259 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
260
261 if (tsgid >= f->num_channels)
262 return -EINVAL;
263
264 tsg = &f->tsg[tsgid];
265 if (!nvgpu_ref_get_unless_zero(&tsg->refcount))
266 return -ENXIO;
267
268 err = gk20a_busy(g);
269 if (err)
270 goto done;
271
272 err = gk20a_tsg_set_runlist_interleave(tsg, arg->runlist_interleave);
273
274 gk20a_idle(g);
275
276done:
277 nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release);
278
279 return err;
280}
281
282static int gk20a_sched_dev_ioctl_lock_control(struct gk20a_sched_ctrl *sched)
283{
284 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "");
285
286 nvgpu_mutex_acquire(&sched->control_lock);
287 sched->control_locked = true;
288 nvgpu_mutex_release(&sched->control_lock);
289 return 0;
290}
291
292static int gk20a_sched_dev_ioctl_unlock_control(struct gk20a_sched_ctrl *sched)
293{
294 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "");
295
296 nvgpu_mutex_acquire(&sched->control_lock);
297 sched->control_locked = false;
298 nvgpu_mutex_release(&sched->control_lock);
299 return 0;
300}
301
302static int gk20a_sched_dev_ioctl_get_api_version(struct gk20a_sched_ctrl *sched,
303 struct nvgpu_sched_api_version_args *args)
304{
305 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "");
306
307 args->version = NVGPU_SCHED_API_VERSION;
308 return 0;
309}
310
311static int gk20a_sched_dev_ioctl_get_tsg(struct gk20a_sched_ctrl *sched,
312 struct nvgpu_sched_tsg_refcount_args *arg)
313{
314 struct gk20a *g = sched->g;
315 struct fifo_gk20a *f = &g->fifo;
316 struct tsg_gk20a *tsg;
317 u32 tsgid = arg->tsgid;
318
319 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
320
321 if (tsgid >= f->num_channels)
322 return -EINVAL;
323
324 tsg = &f->tsg[tsgid];
325 if (!nvgpu_ref_get_unless_zero(&tsg->refcount))
326 return -ENXIO;
327
328 nvgpu_mutex_acquire(&sched->status_lock);
329 if (NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) {
330 nvgpu_warn(g, "tsgid=%d already referenced", tsgid);
331 /* unlock status_lock as nvgpu_ioctl_tsg_release locks it */
332 nvgpu_mutex_release(&sched->status_lock);
333 nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release);
334 return -ENXIO;
335 }
336
337 /* keep reference on TSG, will be released on
338 * NVGPU_SCHED_IOCTL_PUT_TSG ioctl, or close
339 */
340 NVGPU_SCHED_SET(tsgid, sched->ref_tsg_bitmap);
341 nvgpu_mutex_release(&sched->status_lock);
342
343 return 0;
344}
345
346static int gk20a_sched_dev_ioctl_put_tsg(struct gk20a_sched_ctrl *sched,
347 struct nvgpu_sched_tsg_refcount_args *arg)
348{
349 struct gk20a *g = sched->g;
350 struct fifo_gk20a *f = &g->fifo;
351 struct tsg_gk20a *tsg;
352 u32 tsgid = arg->tsgid;
353
354 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid);
355
356 if (tsgid >= f->num_channels)
357 return -EINVAL;
358
359 nvgpu_mutex_acquire(&sched->status_lock);
360 if (!NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) {
361 nvgpu_mutex_release(&sched->status_lock);
362 nvgpu_warn(g, "tsgid=%d not previously referenced", tsgid);
363 return -ENXIO;
364 }
365 NVGPU_SCHED_CLR(tsgid, sched->ref_tsg_bitmap);
366 nvgpu_mutex_release(&sched->status_lock);
367
368 tsg = &f->tsg[tsgid];
369 nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release);
370
371 return 0;
372}
373
374int gk20a_sched_dev_open(struct inode *inode, struct file *filp)
375{
376 struct nvgpu_os_linux *l = container_of(inode->i_cdev,
377 struct nvgpu_os_linux, sched.cdev);
378 struct gk20a *g;
379 struct gk20a_sched_ctrl *sched;
380 int err = 0;
381
382 g = gk20a_get(&l->g);
383 if (!g)
384 return -ENODEV;
385 sched = &l->sched_ctrl;
386
387 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "g=%p", g);
388
389 if (!sched->sw_ready) {
390 err = gk20a_busy(g);
391 if (err)
392 goto free_ref;
393
394 gk20a_idle(g);
395 }
396
397 if (!nvgpu_mutex_tryacquire(&sched->busy_lock)) {
398 err = -EBUSY;
399 goto free_ref;
400 }
401
402 memcpy(sched->recent_tsg_bitmap, sched->active_tsg_bitmap,
403 sched->bitmap_size);
404 memset(sched->ref_tsg_bitmap, 0, sched->bitmap_size);
405
406 filp->private_data = sched;
407 gk20a_dbg(gpu_dbg_sched, "filp=%p sched=%p", filp, sched);
408
409free_ref:
410 if (err)
411 gk20a_put(g);
412 return err;
413}
414
415long gk20a_sched_dev_ioctl(struct file *filp, unsigned int cmd,
416 unsigned long arg)
417{
418 struct gk20a_sched_ctrl *sched = filp->private_data;
419 struct gk20a *g = sched->g;
420 u8 buf[NVGPU_CTXSW_IOCTL_MAX_ARG_SIZE];
421 int err = 0;
422
423 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "nr=%d", _IOC_NR(cmd));
424
425 if ((_IOC_TYPE(cmd) != NVGPU_SCHED_IOCTL_MAGIC) ||
426 (_IOC_NR(cmd) == 0) ||
427 (_IOC_NR(cmd) > NVGPU_SCHED_IOCTL_LAST) ||
428 (_IOC_SIZE(cmd) > NVGPU_SCHED_IOCTL_MAX_ARG_SIZE))
429 return -EINVAL;
430
431 memset(buf, 0, sizeof(buf));
432 if (_IOC_DIR(cmd) & _IOC_WRITE) {
433 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
434 return -EFAULT;
435 }
436
437 switch (cmd) {
438 case NVGPU_SCHED_IOCTL_GET_TSGS:
439 err = gk20a_sched_dev_ioctl_get_tsgs(sched,
440 (struct nvgpu_sched_get_tsgs_args *)buf);
441 break;
442 case NVGPU_SCHED_IOCTL_GET_RECENT_TSGS:
443 err = gk20a_sched_dev_ioctl_get_recent_tsgs(sched,
444 (struct nvgpu_sched_get_tsgs_args *)buf);
445 break;
446 case NVGPU_SCHED_IOCTL_GET_TSGS_BY_PID:
447 err = gk20a_sched_dev_ioctl_get_tsgs_by_pid(sched,
448 (struct nvgpu_sched_get_tsgs_by_pid_args *)buf);
449 break;
450 case NVGPU_SCHED_IOCTL_TSG_GET_PARAMS:
451 err = gk20a_sched_dev_ioctl_get_params(sched,
452 (struct nvgpu_sched_tsg_get_params_args *)buf);
453 break;
454 case NVGPU_SCHED_IOCTL_TSG_SET_TIMESLICE:
455 err = gk20a_sched_dev_ioctl_tsg_set_timeslice(sched,
456 (struct nvgpu_sched_tsg_timeslice_args *)buf);
457 break;
458 case NVGPU_SCHED_IOCTL_TSG_SET_RUNLIST_INTERLEAVE:
459 err = gk20a_sched_dev_ioctl_tsg_set_runlist_interleave(sched,
460 (struct nvgpu_sched_tsg_runlist_interleave_args *)buf);
461 break;
462 case NVGPU_SCHED_IOCTL_LOCK_CONTROL:
463 err = gk20a_sched_dev_ioctl_lock_control(sched);
464 break;
465 case NVGPU_SCHED_IOCTL_UNLOCK_CONTROL:
466 err = gk20a_sched_dev_ioctl_unlock_control(sched);
467 break;
468 case NVGPU_SCHED_IOCTL_GET_API_VERSION:
469 err = gk20a_sched_dev_ioctl_get_api_version(sched,
470 (struct nvgpu_sched_api_version_args *)buf);
471 break;
472 case NVGPU_SCHED_IOCTL_GET_TSG:
473 err = gk20a_sched_dev_ioctl_get_tsg(sched,
474 (struct nvgpu_sched_tsg_refcount_args *)buf);
475 break;
476 case NVGPU_SCHED_IOCTL_PUT_TSG:
477 err = gk20a_sched_dev_ioctl_put_tsg(sched,
478 (struct nvgpu_sched_tsg_refcount_args *)buf);
479 break;
480 default:
481 nvgpu_log_info(g, "unrecognized gpu ioctl cmd: 0x%x", cmd);
482 err = -ENOTTY;
483 }
484
485 /* Some ioctls like NVGPU_SCHED_IOCTL_GET_TSGS might be called on
486 * purpose with NULL buffer and/or zero size to discover TSG bitmap
487 * size. We need to update user arguments in this case too, even
488 * if we return an error.
489 */
490 if ((!err || (err == -ENOSPC)) && (_IOC_DIR(cmd) & _IOC_READ)) {
491 if (copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd)))
492 err = -EFAULT;
493 }
494
495 return err;
496}
497
498int gk20a_sched_dev_release(struct inode *inode, struct file *filp)
499{
500 struct gk20a_sched_ctrl *sched = filp->private_data;
501 struct gk20a *g = sched->g;
502 struct fifo_gk20a *f = &g->fifo;
503 struct tsg_gk20a *tsg;
504 unsigned int tsgid;
505
506 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "sched: %p", sched);
507
508 /* release any reference to TSGs */
509 for (tsgid = 0; tsgid < f->num_channels; tsgid++) {
510 if (NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) {
511 tsg = &f->tsg[tsgid];
512 nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release);
513 }
514 }
515
516 /* unlock control */
517 nvgpu_mutex_acquire(&sched->control_lock);
518 sched->control_locked = false;
519 nvgpu_mutex_release(&sched->control_lock);
520
521 nvgpu_mutex_release(&sched->busy_lock);
522 gk20a_put(g);
523 return 0;
524}
525
526void gk20a_sched_ctrl_tsg_added(struct gk20a *g, struct tsg_gk20a *tsg)
527{
528 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
529 struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
530 int err;
531
532 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
533
534 if (!sched->sw_ready) {
535 err = gk20a_busy(g);
536 if (err) {
537 WARN_ON(err);
538 return;
539 }
540
541 gk20a_idle(g);
542 }
543
544 nvgpu_mutex_acquire(&sched->status_lock);
545 NVGPU_SCHED_SET(tsg->tsgid, sched->active_tsg_bitmap);
546 NVGPU_SCHED_SET(tsg->tsgid, sched->recent_tsg_bitmap);
547 sched->status |= NVGPU_SCHED_STATUS_TSG_OPEN;
548 nvgpu_mutex_release(&sched->status_lock);
549 nvgpu_cond_signal_interruptible(&sched->readout_wq);
550}
551
552void gk20a_sched_ctrl_tsg_removed(struct gk20a *g, struct tsg_gk20a *tsg)
553{
554 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
555 struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
556
557 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
558
559 nvgpu_mutex_acquire(&sched->status_lock);
560 NVGPU_SCHED_CLR(tsg->tsgid, sched->active_tsg_bitmap);
561
562 /* clear recent_tsg_bitmap as well: if app manager did not
563 * notice that TSG was previously added, no need to notify it
564 * if the TSG has been released in the meantime. If the
565 * TSG gets reallocated, app manager will be notified as usual.
566 */
567 NVGPU_SCHED_CLR(tsg->tsgid, sched->recent_tsg_bitmap);
568
569 /* do not set event_pending, we only want to notify app manager
570 * when TSGs are added, so that it can apply sched params
571 */
572 nvgpu_mutex_release(&sched->status_lock);
573}
574
575int gk20a_sched_ctrl_init(struct gk20a *g)
576{
577 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
578 struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
579 struct fifo_gk20a *f = &g->fifo;
580 int err;
581
582 if (sched->sw_ready)
583 return 0;
584
585 sched->g = g;
586 sched->bitmap_size = roundup(f->num_channels, 64) / 8;
587 sched->status = 0;
588
589 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "g=%p sched=%p size=%zu",
590 g, sched, sched->bitmap_size);
591
592 sched->active_tsg_bitmap = nvgpu_kzalloc(g, sched->bitmap_size);
593 if (!sched->active_tsg_bitmap)
594 return -ENOMEM;
595
596 sched->recent_tsg_bitmap = nvgpu_kzalloc(g, sched->bitmap_size);
597 if (!sched->recent_tsg_bitmap) {
598 err = -ENOMEM;
599 goto free_active;
600 }
601
602 sched->ref_tsg_bitmap = nvgpu_kzalloc(g, sched->bitmap_size);
603 if (!sched->ref_tsg_bitmap) {
604 err = -ENOMEM;
605 goto free_recent;
606 }
607
608 nvgpu_cond_init(&sched->readout_wq);
609
610 err = nvgpu_mutex_init(&sched->status_lock);
611 if (err)
612 goto free_ref;
613
614 err = nvgpu_mutex_init(&sched->control_lock);
615 if (err)
616 goto free_status_lock;
617
618 err = nvgpu_mutex_init(&sched->busy_lock);
619 if (err)
620 goto free_control_lock;
621
622 sched->sw_ready = true;
623
624 return 0;
625
626free_control_lock:
627 nvgpu_mutex_destroy(&sched->control_lock);
628free_status_lock:
629 nvgpu_mutex_destroy(&sched->status_lock);
630free_ref:
631 nvgpu_kfree(g, sched->ref_tsg_bitmap);
632free_recent:
633 nvgpu_kfree(g, sched->recent_tsg_bitmap);
634free_active:
635 nvgpu_kfree(g, sched->active_tsg_bitmap);
636
637 return err;
638}
639
640void gk20a_sched_ctrl_cleanup(struct gk20a *g)
641{
642 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
643 struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
644
645 nvgpu_kfree(g, sched->active_tsg_bitmap);
646 nvgpu_kfree(g, sched->recent_tsg_bitmap);
647 nvgpu_kfree(g, sched->ref_tsg_bitmap);
648 sched->active_tsg_bitmap = NULL;
649 sched->recent_tsg_bitmap = NULL;
650 sched->ref_tsg_bitmap = NULL;
651
652 nvgpu_mutex_destroy(&sched->status_lock);
653 nvgpu_mutex_destroy(&sched->control_lock);
654 nvgpu_mutex_destroy(&sched->busy_lock);
655
656 sched->sw_ready = false;
657}