diff options
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/sched.c')
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/sched.c | 57 |
1 files changed, 35 insertions, 22 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/sched.c b/drivers/gpu/nvgpu/common/linux/sched.c index a7da020c..2ad5aabf 100644 --- a/drivers/gpu/nvgpu/common/linux/sched.c +++ b/drivers/gpu/nvgpu/common/linux/sched.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. | 2 | * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms and conditions of the GNU General Public License, | 5 | * under the terms and conditions of the GNU General Public License, |
@@ -37,10 +37,11 @@ ssize_t gk20a_sched_dev_read(struct file *filp, char __user *buf, | |||
37 | size_t size, loff_t *off) | 37 | size_t size, loff_t *off) |
38 | { | 38 | { |
39 | struct gk20a_sched_ctrl *sched = filp->private_data; | 39 | struct gk20a_sched_ctrl *sched = filp->private_data; |
40 | struct gk20a *g = sched->g; | ||
40 | struct nvgpu_sched_event_arg event = { 0 }; | 41 | struct nvgpu_sched_event_arg event = { 0 }; |
41 | int err; | 42 | int err; |
42 | 43 | ||
43 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, | 44 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, |
44 | "filp=%p buf=%p size=%zu", filp, buf, size); | 45 | "filp=%p buf=%p size=%zu", filp, buf, size); |
45 | 46 | ||
46 | if (size < sizeof(event)) | 47 | if (size < sizeof(event)) |
@@ -77,9 +78,10 @@ ssize_t gk20a_sched_dev_read(struct file *filp, char __user *buf, | |||
77 | unsigned int gk20a_sched_dev_poll(struct file *filp, poll_table *wait) | 78 | unsigned int gk20a_sched_dev_poll(struct file *filp, poll_table *wait) |
78 | { | 79 | { |
79 | struct gk20a_sched_ctrl *sched = filp->private_data; | 80 | struct gk20a_sched_ctrl *sched = filp->private_data; |
81 | struct gk20a *g = sched->g; | ||
80 | unsigned int mask = 0; | 82 | unsigned int mask = 0; |
81 | 83 | ||
82 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); | 84 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " "); |
83 | 85 | ||
84 | nvgpu_mutex_acquire(&sched->status_lock); | 86 | nvgpu_mutex_acquire(&sched->status_lock); |
85 | poll_wait(filp, &sched->readout_wq.wq, wait); | 87 | poll_wait(filp, &sched->readout_wq.wq, wait); |
@@ -93,7 +95,9 @@ unsigned int gk20a_sched_dev_poll(struct file *filp, poll_table *wait) | |||
93 | static int gk20a_sched_dev_ioctl_get_tsgs(struct gk20a_sched_ctrl *sched, | 95 | static int gk20a_sched_dev_ioctl_get_tsgs(struct gk20a_sched_ctrl *sched, |
94 | struct nvgpu_sched_get_tsgs_args *arg) | 96 | struct nvgpu_sched_get_tsgs_args *arg) |
95 | { | 97 | { |
96 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx", | 98 | struct gk20a *g = sched->g; |
99 | |||
100 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx", | ||
97 | arg->size, arg->buffer); | 101 | arg->size, arg->buffer); |
98 | 102 | ||
99 | if ((arg->size < sched->bitmap_size) || (!arg->buffer)) { | 103 | if ((arg->size < sched->bitmap_size) || (!arg->buffer)) { |
@@ -115,7 +119,9 @@ static int gk20a_sched_dev_ioctl_get_tsgs(struct gk20a_sched_ctrl *sched, | |||
115 | static int gk20a_sched_dev_ioctl_get_recent_tsgs(struct gk20a_sched_ctrl *sched, | 119 | static int gk20a_sched_dev_ioctl_get_recent_tsgs(struct gk20a_sched_ctrl *sched, |
116 | struct nvgpu_sched_get_tsgs_args *arg) | 120 | struct nvgpu_sched_get_tsgs_args *arg) |
117 | { | 121 | { |
118 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx", | 122 | struct gk20a *g = sched->g; |
123 | |||
124 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "size=%u buffer=%llx", | ||
119 | arg->size, arg->buffer); | 125 | arg->size, arg->buffer); |
120 | 126 | ||
121 | if ((arg->size < sched->bitmap_size) || (!arg->buffer)) { | 127 | if ((arg->size < sched->bitmap_size) || (!arg->buffer)) { |
@@ -139,7 +145,8 @@ static int gk20a_sched_dev_ioctl_get_recent_tsgs(struct gk20a_sched_ctrl *sched, | |||
139 | static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched, | 145 | static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched, |
140 | struct nvgpu_sched_get_tsgs_by_pid_args *arg) | 146 | struct nvgpu_sched_get_tsgs_by_pid_args *arg) |
141 | { | 147 | { |
142 | struct fifo_gk20a *f = &sched->g->fifo; | 148 | struct gk20a *g = sched->g; |
149 | struct fifo_gk20a *f = &g->fifo; | ||
143 | struct tsg_gk20a *tsg; | 150 | struct tsg_gk20a *tsg; |
144 | u64 *bitmap; | 151 | u64 *bitmap; |
145 | unsigned int tsgid; | 152 | unsigned int tsgid; |
@@ -147,7 +154,7 @@ static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched, | |||
147 | pid_t tgid = (pid_t)arg->pid; | 154 | pid_t tgid = (pid_t)arg->pid; |
148 | int err = 0; | 155 | int err = 0; |
149 | 156 | ||
150 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "pid=%d size=%u buffer=%llx", | 157 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "pid=%d size=%u buffer=%llx", |
151 | (pid_t)arg->pid, arg->size, arg->buffer); | 158 | (pid_t)arg->pid, arg->size, arg->buffer); |
152 | 159 | ||
153 | if ((arg->size < sched->bitmap_size) || (!arg->buffer)) { | 160 | if ((arg->size < sched->bitmap_size) || (!arg->buffer)) { |
@@ -186,7 +193,7 @@ static int gk20a_sched_dev_ioctl_get_params(struct gk20a_sched_ctrl *sched, | |||
186 | struct tsg_gk20a *tsg; | 193 | struct tsg_gk20a *tsg; |
187 | u32 tsgid = arg->tsgid; | 194 | u32 tsgid = arg->tsgid; |
188 | 195 | ||
189 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); | 196 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); |
190 | 197 | ||
191 | if (tsgid >= f->num_channels) | 198 | if (tsgid >= f->num_channels) |
192 | return -EINVAL; | 199 | return -EINVAL; |
@@ -221,7 +228,7 @@ static int gk20a_sched_dev_ioctl_tsg_set_timeslice( | |||
221 | u32 tsgid = arg->tsgid; | 228 | u32 tsgid = arg->tsgid; |
222 | int err; | 229 | int err; |
223 | 230 | ||
224 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); | 231 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); |
225 | 232 | ||
226 | if (tsgid >= f->num_channels) | 233 | if (tsgid >= f->num_channels) |
227 | return -EINVAL; | 234 | return -EINVAL; |
@@ -256,7 +263,7 @@ static int gk20a_sched_dev_ioctl_tsg_set_runlist_interleave( | |||
256 | u32 tsgid = arg->tsgid; | 263 | u32 tsgid = arg->tsgid; |
257 | int err; | 264 | int err; |
258 | 265 | ||
259 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); | 266 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); |
260 | 267 | ||
261 | if (tsgid >= f->num_channels) | 268 | if (tsgid >= f->num_channels) |
262 | return -EINVAL; | 269 | return -EINVAL; |
@@ -283,7 +290,9 @@ done: | |||
283 | 290 | ||
284 | static int gk20a_sched_dev_ioctl_lock_control(struct gk20a_sched_ctrl *sched) | 291 | static int gk20a_sched_dev_ioctl_lock_control(struct gk20a_sched_ctrl *sched) |
285 | { | 292 | { |
286 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); | 293 | struct gk20a *g = sched->g; |
294 | |||
295 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " "); | ||
287 | 296 | ||
288 | nvgpu_mutex_acquire(&sched->control_lock); | 297 | nvgpu_mutex_acquire(&sched->control_lock); |
289 | sched->control_locked = true; | 298 | sched->control_locked = true; |
@@ -293,7 +302,9 @@ static int gk20a_sched_dev_ioctl_lock_control(struct gk20a_sched_ctrl *sched) | |||
293 | 302 | ||
294 | static int gk20a_sched_dev_ioctl_unlock_control(struct gk20a_sched_ctrl *sched) | 303 | static int gk20a_sched_dev_ioctl_unlock_control(struct gk20a_sched_ctrl *sched) |
295 | { | 304 | { |
296 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); | 305 | struct gk20a *g = sched->g; |
306 | |||
307 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " "); | ||
297 | 308 | ||
298 | nvgpu_mutex_acquire(&sched->control_lock); | 309 | nvgpu_mutex_acquire(&sched->control_lock); |
299 | sched->control_locked = false; | 310 | sched->control_locked = false; |
@@ -304,7 +315,9 @@ static int gk20a_sched_dev_ioctl_unlock_control(struct gk20a_sched_ctrl *sched) | |||
304 | static int gk20a_sched_dev_ioctl_get_api_version(struct gk20a_sched_ctrl *sched, | 315 | static int gk20a_sched_dev_ioctl_get_api_version(struct gk20a_sched_ctrl *sched, |
305 | struct nvgpu_sched_api_version_args *args) | 316 | struct nvgpu_sched_api_version_args *args) |
306 | { | 317 | { |
307 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); | 318 | struct gk20a *g = sched->g; |
319 | |||
320 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, " "); | ||
308 | 321 | ||
309 | args->version = NVGPU_SCHED_API_VERSION; | 322 | args->version = NVGPU_SCHED_API_VERSION; |
310 | return 0; | 323 | return 0; |
@@ -318,7 +331,7 @@ static int gk20a_sched_dev_ioctl_get_tsg(struct gk20a_sched_ctrl *sched, | |||
318 | struct tsg_gk20a *tsg; | 331 | struct tsg_gk20a *tsg; |
319 | u32 tsgid = arg->tsgid; | 332 | u32 tsgid = arg->tsgid; |
320 | 333 | ||
321 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); | 334 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); |
322 | 335 | ||
323 | if (tsgid >= f->num_channels) | 336 | if (tsgid >= f->num_channels) |
324 | return -EINVAL; | 337 | return -EINVAL; |
@@ -355,7 +368,7 @@ static int gk20a_sched_dev_ioctl_put_tsg(struct gk20a_sched_ctrl *sched, | |||
355 | struct tsg_gk20a *tsg; | 368 | struct tsg_gk20a *tsg; |
356 | u32 tsgid = arg->tsgid; | 369 | u32 tsgid = arg->tsgid; |
357 | 370 | ||
358 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); | 371 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsgid); |
359 | 372 | ||
360 | if (tsgid >= f->num_channels) | 373 | if (tsgid >= f->num_channels) |
361 | return -EINVAL; | 374 | return -EINVAL; |
@@ -390,7 +403,7 @@ int gk20a_sched_dev_open(struct inode *inode, struct file *filp) | |||
390 | return -ENODEV; | 403 | return -ENODEV; |
391 | sched = &l->sched_ctrl; | 404 | sched = &l->sched_ctrl; |
392 | 405 | ||
393 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "g=%p", g); | 406 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "g=%p", g); |
394 | 407 | ||
395 | if (!sched->sw_ready) { | 408 | if (!sched->sw_ready) { |
396 | err = gk20a_busy(g); | 409 | err = gk20a_busy(g); |
@@ -410,7 +423,7 @@ int gk20a_sched_dev_open(struct inode *inode, struct file *filp) | |||
410 | memset(sched->ref_tsg_bitmap, 0, sched->bitmap_size); | 423 | memset(sched->ref_tsg_bitmap, 0, sched->bitmap_size); |
411 | 424 | ||
412 | filp->private_data = sched; | 425 | filp->private_data = sched; |
413 | gk20a_dbg(gpu_dbg_sched, "filp=%p sched=%p", filp, sched); | 426 | nvgpu_log(g, gpu_dbg_sched, "filp=%p sched=%p", filp, sched); |
414 | 427 | ||
415 | free_ref: | 428 | free_ref: |
416 | if (err) | 429 | if (err) |
@@ -426,7 +439,7 @@ long gk20a_sched_dev_ioctl(struct file *filp, unsigned int cmd, | |||
426 | u8 buf[NVGPU_CTXSW_IOCTL_MAX_ARG_SIZE]; | 439 | u8 buf[NVGPU_CTXSW_IOCTL_MAX_ARG_SIZE]; |
427 | int err = 0; | 440 | int err = 0; |
428 | 441 | ||
429 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "nr=%d", _IOC_NR(cmd)); | 442 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "nr=%d", _IOC_NR(cmd)); |
430 | 443 | ||
431 | if ((_IOC_TYPE(cmd) != NVGPU_SCHED_IOCTL_MAGIC) || | 444 | if ((_IOC_TYPE(cmd) != NVGPU_SCHED_IOCTL_MAGIC) || |
432 | (_IOC_NR(cmd) == 0) || | 445 | (_IOC_NR(cmd) == 0) || |
@@ -509,7 +522,7 @@ int gk20a_sched_dev_release(struct inode *inode, struct file *filp) | |||
509 | struct tsg_gk20a *tsg; | 522 | struct tsg_gk20a *tsg; |
510 | unsigned int tsgid; | 523 | unsigned int tsgid; |
511 | 524 | ||
512 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "sched: %p", sched); | 525 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "sched: %p", sched); |
513 | 526 | ||
514 | /* release any reference to TSGs */ | 527 | /* release any reference to TSGs */ |
515 | for (tsgid = 0; tsgid < f->num_channels; tsgid++) { | 528 | for (tsgid = 0; tsgid < f->num_channels; tsgid++) { |
@@ -535,7 +548,7 @@ void gk20a_sched_ctrl_tsg_added(struct gk20a *g, struct tsg_gk20a *tsg) | |||
535 | struct gk20a_sched_ctrl *sched = &l->sched_ctrl; | 548 | struct gk20a_sched_ctrl *sched = &l->sched_ctrl; |
536 | int err; | 549 | int err; |
537 | 550 | ||
538 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); | 551 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); |
539 | 552 | ||
540 | if (!sched->sw_ready) { | 553 | if (!sched->sw_ready) { |
541 | err = gk20a_busy(g); | 554 | err = gk20a_busy(g); |
@@ -560,7 +573,7 @@ void gk20a_sched_ctrl_tsg_removed(struct gk20a *g, struct tsg_gk20a *tsg) | |||
560 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); | 573 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); |
561 | struct gk20a_sched_ctrl *sched = &l->sched_ctrl; | 574 | struct gk20a_sched_ctrl *sched = &l->sched_ctrl; |
562 | 575 | ||
563 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); | 576 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); |
564 | 577 | ||
565 | nvgpu_mutex_acquire(&sched->status_lock); | 578 | nvgpu_mutex_acquire(&sched->status_lock); |
566 | NVGPU_SCHED_CLR(tsg->tsgid, sched->active_tsg_bitmap); | 579 | NVGPU_SCHED_CLR(tsg->tsgid, sched->active_tsg_bitmap); |
@@ -592,7 +605,7 @@ int gk20a_sched_ctrl_init(struct gk20a *g) | |||
592 | sched->bitmap_size = roundup(f->num_channels, 64) / 8; | 605 | sched->bitmap_size = roundup(f->num_channels, 64) / 8; |
593 | sched->status = 0; | 606 | sched->status = 0; |
594 | 607 | ||
595 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "g=%p sched=%p size=%zu", | 608 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "g=%p sched=%p size=%zu", |
596 | g, sched, sched->bitmap_size); | 609 | g, sched, sched->bitmap_size); |
597 | 610 | ||
598 | sched->active_tsg_bitmap = nvgpu_kzalloc(g, sched->bitmap_size); | 611 | sched->active_tsg_bitmap = nvgpu_kzalloc(g, sched->bitmap_size); |