From 8ee3aa4b3175d8d27e57a0f5d5e2cdf3d78a4a58 Mon Sep 17 00:00:00 2001 From: Deepak Nibade Date: Tue, 24 Jan 2017 19:00:42 +0530 Subject: gpu: nvgpu: use common nvgpu mutex/spinlock APIs Instead of using Linux APIs for mutex and spinlocks directly, use new APIs defined in Replace Linux specific mutex/spinlock declaration, init, lock, unlock APIs with new APIs e.g struct mutex is replaced by struct nvgpu_mutex and mutex_lock() is replaced by nvgpu_mutex_acquire() And also include instead of including and Add explicit nvgpu/lock.h includes to below files to fix complilation failures. gk20a/platform_gk20a.h include/nvgpu/allocator.h Jira NVGPU-13 Change-Id: I81a05d21ecdbd90c2076a9f0aefd0e40b215bd33 Signed-off-by: Deepak Nibade Reviewed-on: http://git-master/r/1293187 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gk20a/sched_gk20a.c | 80 +++++++++++++++++------------------ 1 file changed, 40 insertions(+), 40 deletions(-) (limited to 'drivers/gpu/nvgpu/gk20a/sched_gk20a.c') diff --git a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c index 20cd1232..6fdc2774 100644 --- a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c @@ -46,29 +46,29 @@ ssize_t gk20a_sched_dev_read(struct file *filp, char __user *buf, return -EINVAL; size = sizeof(event); - mutex_lock(&sched->status_lock); + nvgpu_mutex_acquire(&sched->status_lock); while (!sched->status) { - mutex_unlock(&sched->status_lock); + nvgpu_mutex_release(&sched->status_lock); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; err = wait_event_interruptible(sched->readout_wq, sched->status); if (err) return err; - mutex_lock(&sched->status_lock); + nvgpu_mutex_acquire(&sched->status_lock); } event.reserved = 0; event.status = sched->status; if (copy_to_user(buf, &event, size)) { - mutex_unlock(&sched->status_lock); + nvgpu_mutex_release(&sched->status_lock); return -EFAULT; } sched->status = 0; - mutex_unlock(&sched->status_lock); + nvgpu_mutex_release(&sched->status_lock); return size; } @@ -80,11 +80,11 @@ unsigned int gk20a_sched_dev_poll(struct file *filp, poll_table *wait) gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); - mutex_lock(&sched->status_lock); + nvgpu_mutex_acquire(&sched->status_lock); poll_wait(filp, &sched->readout_wq, wait); if (sched->status) mask |= POLLIN | POLLRDNORM; - mutex_unlock(&sched->status_lock); + nvgpu_mutex_release(&sched->status_lock); return mask; } @@ -100,13 +100,13 @@ static int gk20a_sched_dev_ioctl_get_tsgs(struct gk20a_sched_ctrl *sched, return -ENOSPC; } - mutex_lock(&sched->status_lock); + nvgpu_mutex_acquire(&sched->status_lock); if (copy_to_user((void __user *)(uintptr_t)arg->buffer, sched->active_tsg_bitmap, sched->bitmap_size)) { - mutex_unlock(&sched->status_lock); + nvgpu_mutex_release(&sched->status_lock); return -EFAULT; } - mutex_unlock(&sched->status_lock); + nvgpu_mutex_release(&sched->status_lock); return 0; } @@ -122,15 +122,15 @@ static int gk20a_sched_dev_ioctl_get_recent_tsgs(struct gk20a_sched_ctrl *sched, return -ENOSPC; } - mutex_lock(&sched->status_lock); + nvgpu_mutex_acquire(&sched->status_lock); if (copy_to_user((void __user *)(uintptr_t)arg->buffer, sched->recent_tsg_bitmap, sched->bitmap_size)) { - mutex_unlock(&sched->status_lock); + nvgpu_mutex_release(&sched->status_lock); return -EFAULT; } memset(sched->recent_tsg_bitmap, 0, sched->bitmap_size); - mutex_unlock(&sched->status_lock); + nvgpu_mutex_release(&sched->status_lock); return 0; } @@ -158,7 +158,7 @@ static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched, if (!bitmap) return -ENOMEM; - mutex_lock(&sched->status_lock); + nvgpu_mutex_acquire(&sched->status_lock); for (tsgid = 0; tsgid < f->num_channels; tsgid++) { if (NVGPU_SCHED_ISSET(tsgid, sched->active_tsg_bitmap)) { tsg = &f->tsg[tsgid]; @@ -166,7 +166,7 @@ static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched, NVGPU_SCHED_SET(tsgid, bitmap); } } - mutex_unlock(&sched->status_lock); + nvgpu_mutex_release(&sched->status_lock); if (copy_to_user((void __user *)(uintptr_t)arg->buffer, bitmap, sched->bitmap_size)) @@ -283,9 +283,9 @@ static int gk20a_sched_dev_ioctl_lock_control(struct gk20a_sched_ctrl *sched) { gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); - mutex_lock(&sched->control_lock); + nvgpu_mutex_acquire(&sched->control_lock); sched->control_locked = true; - mutex_unlock(&sched->control_lock); + nvgpu_mutex_release(&sched->control_lock); return 0; } @@ -293,9 +293,9 @@ static int gk20a_sched_dev_ioctl_unlock_control(struct gk20a_sched_ctrl *sched) { gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); - mutex_lock(&sched->control_lock); + nvgpu_mutex_acquire(&sched->control_lock); sched->control_locked = false; - mutex_unlock(&sched->control_lock); + nvgpu_mutex_release(&sched->control_lock); return 0; } @@ -325,12 +325,12 @@ static int gk20a_sched_dev_ioctl_get_tsg(struct gk20a_sched_ctrl *sched, if (!kref_get_unless_zero(&tsg->refcount)) return -ENXIO; - mutex_lock(&sched->status_lock); + nvgpu_mutex_acquire(&sched->status_lock); if (NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) { gk20a_warn(dev_from_gk20a(g), "tsgid=%d already referenced", tsgid); /* unlock status_lock as gk20a_tsg_release locks it */ - mutex_unlock(&sched->status_lock); + nvgpu_mutex_release(&sched->status_lock); kref_put(&tsg->refcount, gk20a_tsg_release); return -ENXIO; } @@ -339,7 +339,7 @@ static int gk20a_sched_dev_ioctl_get_tsg(struct gk20a_sched_ctrl *sched, * NVGPU_SCHED_IOCTL_PUT_TSG ioctl, or close */ NVGPU_SCHED_SET(tsgid, sched->ref_tsg_bitmap); - mutex_unlock(&sched->status_lock); + nvgpu_mutex_release(&sched->status_lock); return 0; } @@ -357,15 +357,15 @@ static int gk20a_sched_dev_ioctl_put_tsg(struct gk20a_sched_ctrl *sched, if (tsgid >= f->num_channels) return -EINVAL; - mutex_lock(&sched->status_lock); + nvgpu_mutex_acquire(&sched->status_lock); if (!NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) { - mutex_unlock(&sched->status_lock); + nvgpu_mutex_release(&sched->status_lock); gk20a_warn(dev_from_gk20a(g), "tsgid=%d not previously referenced", tsgid); return -ENXIO; } NVGPU_SCHED_CLR(tsgid, sched->ref_tsg_bitmap); - mutex_unlock(&sched->status_lock); + nvgpu_mutex_release(&sched->status_lock); tsg = &f->tsg[tsgid]; kref_put(&tsg->refcount, gk20a_tsg_release); @@ -390,7 +390,7 @@ int gk20a_sched_dev_open(struct inode *inode, struct file *filp) gk20a_idle(g->dev); } - if (!mutex_trylock(&sched->busy_lock)) + if (!nvgpu_mutex_tryacquire(&sched->busy_lock)) return -EBUSY; memcpy(sched->recent_tsg_bitmap, sched->active_tsg_bitmap, @@ -506,11 +506,11 @@ int gk20a_sched_dev_release(struct inode *inode, struct file *filp) } /* unlock control */ - mutex_lock(&sched->control_lock); + nvgpu_mutex_acquire(&sched->control_lock); sched->control_locked = false; - mutex_unlock(&sched->control_lock); + nvgpu_mutex_release(&sched->control_lock); - mutex_unlock(&sched->busy_lock); + nvgpu_mutex_release(&sched->busy_lock); return 0; } @@ -530,16 +530,16 @@ static int gk20a_sched_debugfs_show(struct seq_file *s, void *unused) if (err) return err; - if (mutex_trylock(&sched->busy_lock)) { + if (nvgpu_mutex_tryacquire(&sched->busy_lock)) { sched_busy = false; - mutex_unlock(&sched->busy_lock); + nvgpu_mutex_release(&sched->busy_lock); } seq_printf(s, "control_locked=%d\n", sched->control_locked); seq_printf(s, "busy=%d\n", sched_busy); seq_printf(s, "bitmap_size=%zu\n", sched->bitmap_size); - mutex_lock(&sched->status_lock); + nvgpu_mutex_acquire(&sched->status_lock); seq_puts(s, "active_tsg_bitmap\n"); for (i = 0; i < n; i++) @@ -549,7 +549,7 @@ static int gk20a_sched_debugfs_show(struct seq_file *s, void *unused) for (i = 0; i < n; i++) seq_printf(s, "\t0x%016llx\n", sched->recent_tsg_bitmap[i]); - mutex_unlock(&sched->status_lock); + nvgpu_mutex_release(&sched->status_lock); gk20a_idle(g->dev); @@ -594,11 +594,11 @@ void gk20a_sched_ctrl_tsg_added(struct gk20a *g, struct tsg_gk20a *tsg) gk20a_idle(g->dev); } - mutex_lock(&sched->status_lock); + nvgpu_mutex_acquire(&sched->status_lock); NVGPU_SCHED_SET(tsg->tsgid, sched->active_tsg_bitmap); NVGPU_SCHED_SET(tsg->tsgid, sched->recent_tsg_bitmap); sched->status |= NVGPU_SCHED_STATUS_TSG_OPEN; - mutex_unlock(&sched->status_lock); + nvgpu_mutex_release(&sched->status_lock); wake_up_interruptible(&sched->readout_wq); } @@ -608,7 +608,7 @@ void gk20a_sched_ctrl_tsg_removed(struct gk20a *g, struct tsg_gk20a *tsg) gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); - mutex_lock(&sched->status_lock); + nvgpu_mutex_acquire(&sched->status_lock); NVGPU_SCHED_CLR(tsg->tsgid, sched->active_tsg_bitmap); /* clear recent_tsg_bitmap as well: if app manager did not @@ -621,7 +621,7 @@ void gk20a_sched_ctrl_tsg_removed(struct gk20a *g, struct tsg_gk20a *tsg) /* do not set event_pending, we only want to notify app manager * when TSGs are added, so that it can apply sched params */ - mutex_unlock(&sched->status_lock); + nvgpu_mutex_release(&sched->status_lock); } int gk20a_sched_ctrl_init(struct gk20a *g) @@ -652,9 +652,9 @@ int gk20a_sched_ctrl_init(struct gk20a *g) goto free_recent; init_waitqueue_head(&sched->readout_wq); - mutex_init(&sched->status_lock); - mutex_init(&sched->control_lock); - mutex_init(&sched->busy_lock); + nvgpu_mutex_init(&sched->status_lock); + nvgpu_mutex_init(&sched->control_lock); + nvgpu_mutex_init(&sched->busy_lock); sched->sw_ready = true; -- cgit v1.2.2