summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/sched_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/sched_gk20a.c80
1 files changed, 40 insertions, 40 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
index 20cd1232..6fdc2774 100644
--- a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
@@ -46,29 +46,29 @@ ssize_t gk20a_sched_dev_read(struct file *filp, char __user *buf,
46 return -EINVAL; 46 return -EINVAL;
47 size = sizeof(event); 47 size = sizeof(event);
48 48
49 mutex_lock(&sched->status_lock); 49 nvgpu_mutex_acquire(&sched->status_lock);
50 while (!sched->status) { 50 while (!sched->status) {
51 mutex_unlock(&sched->status_lock); 51 nvgpu_mutex_release(&sched->status_lock);
52 if (filp->f_flags & O_NONBLOCK) 52 if (filp->f_flags & O_NONBLOCK)
53 return -EAGAIN; 53 return -EAGAIN;
54 err = wait_event_interruptible(sched->readout_wq, 54 err = wait_event_interruptible(sched->readout_wq,
55 sched->status); 55 sched->status);
56 if (err) 56 if (err)
57 return err; 57 return err;
58 mutex_lock(&sched->status_lock); 58 nvgpu_mutex_acquire(&sched->status_lock);
59 } 59 }
60 60
61 event.reserved = 0; 61 event.reserved = 0;
62 event.status = sched->status; 62 event.status = sched->status;
63 63
64 if (copy_to_user(buf, &event, size)) { 64 if (copy_to_user(buf, &event, size)) {
65 mutex_unlock(&sched->status_lock); 65 nvgpu_mutex_release(&sched->status_lock);
66 return -EFAULT; 66 return -EFAULT;
67 } 67 }
68 68
69 sched->status = 0; 69 sched->status = 0;
70 70
71 mutex_unlock(&sched->status_lock); 71 nvgpu_mutex_release(&sched->status_lock);
72 72
73 return size; 73 return size;
74} 74}
@@ -80,11 +80,11 @@ unsigned int gk20a_sched_dev_poll(struct file *filp, poll_table *wait)
80 80
81 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); 81 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "");
82 82
83 mutex_lock(&sched->status_lock); 83 nvgpu_mutex_acquire(&sched->status_lock);
84 poll_wait(filp, &sched->readout_wq, wait); 84 poll_wait(filp, &sched->readout_wq, wait);
85 if (sched->status) 85 if (sched->status)
86 mask |= POLLIN | POLLRDNORM; 86 mask |= POLLIN | POLLRDNORM;
87 mutex_unlock(&sched->status_lock); 87 nvgpu_mutex_release(&sched->status_lock);
88 88
89 return mask; 89 return mask;
90} 90}
@@ -100,13 +100,13 @@ static int gk20a_sched_dev_ioctl_get_tsgs(struct gk20a_sched_ctrl *sched,
100 return -ENOSPC; 100 return -ENOSPC;
101 } 101 }
102 102
103 mutex_lock(&sched->status_lock); 103 nvgpu_mutex_acquire(&sched->status_lock);
104 if (copy_to_user((void __user *)(uintptr_t)arg->buffer, 104 if (copy_to_user((void __user *)(uintptr_t)arg->buffer,
105 sched->active_tsg_bitmap, sched->bitmap_size)) { 105 sched->active_tsg_bitmap, sched->bitmap_size)) {
106 mutex_unlock(&sched->status_lock); 106 nvgpu_mutex_release(&sched->status_lock);
107 return -EFAULT; 107 return -EFAULT;
108 } 108 }
109 mutex_unlock(&sched->status_lock); 109 nvgpu_mutex_release(&sched->status_lock);
110 110
111 return 0; 111 return 0;
112} 112}
@@ -122,15 +122,15 @@ static int gk20a_sched_dev_ioctl_get_recent_tsgs(struct gk20a_sched_ctrl *sched,
122 return -ENOSPC; 122 return -ENOSPC;
123 } 123 }
124 124
125 mutex_lock(&sched->status_lock); 125 nvgpu_mutex_acquire(&sched->status_lock);
126 if (copy_to_user((void __user *)(uintptr_t)arg->buffer, 126 if (copy_to_user((void __user *)(uintptr_t)arg->buffer,
127 sched->recent_tsg_bitmap, sched->bitmap_size)) { 127 sched->recent_tsg_bitmap, sched->bitmap_size)) {
128 mutex_unlock(&sched->status_lock); 128 nvgpu_mutex_release(&sched->status_lock);
129 return -EFAULT; 129 return -EFAULT;
130 } 130 }
131 131
132 memset(sched->recent_tsg_bitmap, 0, sched->bitmap_size); 132 memset(sched->recent_tsg_bitmap, 0, sched->bitmap_size);
133 mutex_unlock(&sched->status_lock); 133 nvgpu_mutex_release(&sched->status_lock);
134 134
135 return 0; 135 return 0;
136} 136}
@@ -158,7 +158,7 @@ static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched,
158 if (!bitmap) 158 if (!bitmap)
159 return -ENOMEM; 159 return -ENOMEM;
160 160
161 mutex_lock(&sched->status_lock); 161 nvgpu_mutex_acquire(&sched->status_lock);
162 for (tsgid = 0; tsgid < f->num_channels; tsgid++) { 162 for (tsgid = 0; tsgid < f->num_channels; tsgid++) {
163 if (NVGPU_SCHED_ISSET(tsgid, sched->active_tsg_bitmap)) { 163 if (NVGPU_SCHED_ISSET(tsgid, sched->active_tsg_bitmap)) {
164 tsg = &f->tsg[tsgid]; 164 tsg = &f->tsg[tsgid];
@@ -166,7 +166,7 @@ static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched,
166 NVGPU_SCHED_SET(tsgid, bitmap); 166 NVGPU_SCHED_SET(tsgid, bitmap);
167 } 167 }
168 } 168 }
169 mutex_unlock(&sched->status_lock); 169 nvgpu_mutex_release(&sched->status_lock);
170 170
171 if (copy_to_user((void __user *)(uintptr_t)arg->buffer, 171 if (copy_to_user((void __user *)(uintptr_t)arg->buffer,
172 bitmap, sched->bitmap_size)) 172 bitmap, sched->bitmap_size))
@@ -283,9 +283,9 @@ static int gk20a_sched_dev_ioctl_lock_control(struct gk20a_sched_ctrl *sched)
283{ 283{
284 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); 284 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "");
285 285
286 mutex_lock(&sched->control_lock); 286 nvgpu_mutex_acquire(&sched->control_lock);
287 sched->control_locked = true; 287 sched->control_locked = true;
288 mutex_unlock(&sched->control_lock); 288 nvgpu_mutex_release(&sched->control_lock);
289 return 0; 289 return 0;
290} 290}
291 291
@@ -293,9 +293,9 @@ static int gk20a_sched_dev_ioctl_unlock_control(struct gk20a_sched_ctrl *sched)
293{ 293{
294 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); 294 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "");
295 295
296 mutex_lock(&sched->control_lock); 296 nvgpu_mutex_acquire(&sched->control_lock);
297 sched->control_locked = false; 297 sched->control_locked = false;
298 mutex_unlock(&sched->control_lock); 298 nvgpu_mutex_release(&sched->control_lock);
299 return 0; 299 return 0;
300} 300}
301 301
@@ -325,12 +325,12 @@ static int gk20a_sched_dev_ioctl_get_tsg(struct gk20a_sched_ctrl *sched,
325 if (!kref_get_unless_zero(&tsg->refcount)) 325 if (!kref_get_unless_zero(&tsg->refcount))
326 return -ENXIO; 326 return -ENXIO;
327 327
328 mutex_lock(&sched->status_lock); 328 nvgpu_mutex_acquire(&sched->status_lock);
329 if (NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) { 329 if (NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) {
330 gk20a_warn(dev_from_gk20a(g), 330 gk20a_warn(dev_from_gk20a(g),
331 "tsgid=%d already referenced", tsgid); 331 "tsgid=%d already referenced", tsgid);
332 /* unlock status_lock as gk20a_tsg_release locks it */ 332 /* unlock status_lock as gk20a_tsg_release locks it */
333 mutex_unlock(&sched->status_lock); 333 nvgpu_mutex_release(&sched->status_lock);
334 kref_put(&tsg->refcount, gk20a_tsg_release); 334 kref_put(&tsg->refcount, gk20a_tsg_release);
335 return -ENXIO; 335 return -ENXIO;
336 } 336 }
@@ -339,7 +339,7 @@ static int gk20a_sched_dev_ioctl_get_tsg(struct gk20a_sched_ctrl *sched,
339 * NVGPU_SCHED_IOCTL_PUT_TSG ioctl, or close 339 * NVGPU_SCHED_IOCTL_PUT_TSG ioctl, or close
340 */ 340 */
341 NVGPU_SCHED_SET(tsgid, sched->ref_tsg_bitmap); 341 NVGPU_SCHED_SET(tsgid, sched->ref_tsg_bitmap);
342 mutex_unlock(&sched->status_lock); 342 nvgpu_mutex_release(&sched->status_lock);
343 343
344 return 0; 344 return 0;
345} 345}
@@ -357,15 +357,15 @@ static int gk20a_sched_dev_ioctl_put_tsg(struct gk20a_sched_ctrl *sched,
357 if (tsgid >= f->num_channels) 357 if (tsgid >= f->num_channels)
358 return -EINVAL; 358 return -EINVAL;
359 359
360 mutex_lock(&sched->status_lock); 360 nvgpu_mutex_acquire(&sched->status_lock);
361 if (!NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) { 361 if (!NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) {
362 mutex_unlock(&sched->status_lock); 362 nvgpu_mutex_release(&sched->status_lock);
363 gk20a_warn(dev_from_gk20a(g), 363 gk20a_warn(dev_from_gk20a(g),
364 "tsgid=%d not previously referenced", tsgid); 364 "tsgid=%d not previously referenced", tsgid);
365 return -ENXIO; 365 return -ENXIO;
366 } 366 }
367 NVGPU_SCHED_CLR(tsgid, sched->ref_tsg_bitmap); 367 NVGPU_SCHED_CLR(tsgid, sched->ref_tsg_bitmap);
368 mutex_unlock(&sched->status_lock); 368 nvgpu_mutex_release(&sched->status_lock);
369 369
370 tsg = &f->tsg[tsgid]; 370 tsg = &f->tsg[tsgid];
371 kref_put(&tsg->refcount, gk20a_tsg_release); 371 kref_put(&tsg->refcount, gk20a_tsg_release);
@@ -390,7 +390,7 @@ int gk20a_sched_dev_open(struct inode *inode, struct file *filp)
390 gk20a_idle(g->dev); 390 gk20a_idle(g->dev);
391 } 391 }
392 392
393 if (!mutex_trylock(&sched->busy_lock)) 393 if (!nvgpu_mutex_tryacquire(&sched->busy_lock))
394 return -EBUSY; 394 return -EBUSY;
395 395
396 memcpy(sched->recent_tsg_bitmap, sched->active_tsg_bitmap, 396 memcpy(sched->recent_tsg_bitmap, sched->active_tsg_bitmap,
@@ -506,11 +506,11 @@ int gk20a_sched_dev_release(struct inode *inode, struct file *filp)
506 } 506 }
507 507
508 /* unlock control */ 508 /* unlock control */
509 mutex_lock(&sched->control_lock); 509 nvgpu_mutex_acquire(&sched->control_lock);
510 sched->control_locked = false; 510 sched->control_locked = false;
511 mutex_unlock(&sched->control_lock); 511 nvgpu_mutex_release(&sched->control_lock);
512 512
513 mutex_unlock(&sched->busy_lock); 513 nvgpu_mutex_release(&sched->busy_lock);
514 return 0; 514 return 0;
515} 515}
516 516
@@ -530,16 +530,16 @@ static int gk20a_sched_debugfs_show(struct seq_file *s, void *unused)
530 if (err) 530 if (err)
531 return err; 531 return err;
532 532
533 if (mutex_trylock(&sched->busy_lock)) { 533 if (nvgpu_mutex_tryacquire(&sched->busy_lock)) {
534 sched_busy = false; 534 sched_busy = false;
535 mutex_unlock(&sched->busy_lock); 535 nvgpu_mutex_release(&sched->busy_lock);
536 } 536 }
537 537
538 seq_printf(s, "control_locked=%d\n", sched->control_locked); 538 seq_printf(s, "control_locked=%d\n", sched->control_locked);
539 seq_printf(s, "busy=%d\n", sched_busy); 539 seq_printf(s, "busy=%d\n", sched_busy);
540 seq_printf(s, "bitmap_size=%zu\n", sched->bitmap_size); 540 seq_printf(s, "bitmap_size=%zu\n", sched->bitmap_size);
541 541
542 mutex_lock(&sched->status_lock); 542 nvgpu_mutex_acquire(&sched->status_lock);
543 543
544 seq_puts(s, "active_tsg_bitmap\n"); 544 seq_puts(s, "active_tsg_bitmap\n");
545 for (i = 0; i < n; i++) 545 for (i = 0; i < n; i++)
@@ -549,7 +549,7 @@ static int gk20a_sched_debugfs_show(struct seq_file *s, void *unused)
549 for (i = 0; i < n; i++) 549 for (i = 0; i < n; i++)
550 seq_printf(s, "\t0x%016llx\n", sched->recent_tsg_bitmap[i]); 550 seq_printf(s, "\t0x%016llx\n", sched->recent_tsg_bitmap[i]);
551 551
552 mutex_unlock(&sched->status_lock); 552 nvgpu_mutex_release(&sched->status_lock);
553 553
554 gk20a_idle(g->dev); 554 gk20a_idle(g->dev);
555 555
@@ -594,11 +594,11 @@ void gk20a_sched_ctrl_tsg_added(struct gk20a *g, struct tsg_gk20a *tsg)
594 gk20a_idle(g->dev); 594 gk20a_idle(g->dev);
595 } 595 }
596 596
597 mutex_lock(&sched->status_lock); 597 nvgpu_mutex_acquire(&sched->status_lock);
598 NVGPU_SCHED_SET(tsg->tsgid, sched->active_tsg_bitmap); 598 NVGPU_SCHED_SET(tsg->tsgid, sched->active_tsg_bitmap);
599 NVGPU_SCHED_SET(tsg->tsgid, sched->recent_tsg_bitmap); 599 NVGPU_SCHED_SET(tsg->tsgid, sched->recent_tsg_bitmap);
600 sched->status |= NVGPU_SCHED_STATUS_TSG_OPEN; 600 sched->status |= NVGPU_SCHED_STATUS_TSG_OPEN;
601 mutex_unlock(&sched->status_lock); 601 nvgpu_mutex_release(&sched->status_lock);
602 wake_up_interruptible(&sched->readout_wq); 602 wake_up_interruptible(&sched->readout_wq);
603} 603}
604 604
@@ -608,7 +608,7 @@ void gk20a_sched_ctrl_tsg_removed(struct gk20a *g, struct tsg_gk20a *tsg)
608 608
609 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); 609 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
610 610
611 mutex_lock(&sched->status_lock); 611 nvgpu_mutex_acquire(&sched->status_lock);
612 NVGPU_SCHED_CLR(tsg->tsgid, sched->active_tsg_bitmap); 612 NVGPU_SCHED_CLR(tsg->tsgid, sched->active_tsg_bitmap);
613 613
614 /* clear recent_tsg_bitmap as well: if app manager did not 614 /* clear recent_tsg_bitmap as well: if app manager did not
@@ -621,7 +621,7 @@ void gk20a_sched_ctrl_tsg_removed(struct gk20a *g, struct tsg_gk20a *tsg)
621 /* do not set event_pending, we only want to notify app manager 621 /* do not set event_pending, we only want to notify app manager
622 * when TSGs are added, so that it can apply sched params 622 * when TSGs are added, so that it can apply sched params
623 */ 623 */
624 mutex_unlock(&sched->status_lock); 624 nvgpu_mutex_release(&sched->status_lock);
625} 625}
626 626
627int gk20a_sched_ctrl_init(struct gk20a *g) 627int gk20a_sched_ctrl_init(struct gk20a *g)
@@ -652,9 +652,9 @@ int gk20a_sched_ctrl_init(struct gk20a *g)
652 goto free_recent; 652 goto free_recent;
653 653
654 init_waitqueue_head(&sched->readout_wq); 654 init_waitqueue_head(&sched->readout_wq);
655 mutex_init(&sched->status_lock); 655 nvgpu_mutex_init(&sched->status_lock);
656 mutex_init(&sched->control_lock); 656 nvgpu_mutex_init(&sched->control_lock);
657 mutex_init(&sched->busy_lock); 657 nvgpu_mutex_init(&sched->busy_lock);
658 658
659 sched->sw_ready = true; 659 sched->sw_ready = true;
660 660