From c0dd9ea9c85295207774de5637dc0c581ff8ee6e Mon Sep 17 00:00:00 2001 From: Aingara Paramakuru Date: Wed, 10 Aug 2016 20:03:49 -0400 Subject: gpu: nvgpu: use spinlock for ch timeout lock The channel timeout lock guards a very small critical section. Use a spinlock instead of a mutex for performance. Bug 1795076 Change-Id: I94940f3fbe84ed539bcf1bc76ca6ae7a0ef2fe13 Signed-off-by: Aingara Paramakuru Reviewed-on: http://git-master/r/1200803 (cherry picked from commit 4fa9e973da141067be145d9eba2ea74e96869dcd) Reviewed-on: http://git-master/r/1203799 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom --- drivers/gpu/nvgpu/gk20a/channel_gk20a.c | 30 +++++++++++++++--------------- drivers/gpu/nvgpu/gk20a/channel_gk20a.h | 2 +- 2 files changed, 16 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c index d23a8026..4c03f955 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c @@ -1660,35 +1660,35 @@ static void gk20a_channel_timeout_start(struct channel_gk20a *ch, if (!ch->wdt_enabled) return; - mutex_lock(&ch->timeout.lock); + raw_spin_lock(&ch->timeout.lock); if (ch->timeout.initialized) { - mutex_unlock(&ch->timeout.lock); + raw_spin_unlock(&ch->timeout.lock); return; } ch->timeout.job = job; ch->timeout.initialized = true; + raw_spin_unlock(&ch->timeout.lock); + schedule_delayed_work(&ch->timeout.wq, msecs_to_jiffies(gk20a_get_channel_watchdog_timeout(ch))); - - mutex_unlock(&ch->timeout.lock); } static void gk20a_channel_timeout_stop(struct channel_gk20a *ch) { - mutex_lock(&ch->timeout.lock); + raw_spin_lock(&ch->timeout.lock); if (!ch->timeout.initialized) { - mutex_unlock(&ch->timeout.lock); + raw_spin_unlock(&ch->timeout.lock); return; } - mutex_unlock(&ch->timeout.lock); + raw_spin_unlock(&ch->timeout.lock); cancel_delayed_work_sync(&ch->timeout.wq); - mutex_lock(&ch->timeout.lock); + raw_spin_lock(&ch->timeout.lock); ch->timeout.initialized = false; - mutex_unlock(&ch->timeout.lock); + raw_spin_unlock(&ch->timeout.lock); } void gk20a_channel_timeout_restart_all_channels(struct gk20a *g) @@ -1700,13 +1700,13 @@ void gk20a_channel_timeout_restart_all_channels(struct gk20a *g) struct channel_gk20a *ch = &f->channel[chid]; if (gk20a_channel_get(ch)) { - mutex_lock(&ch->timeout.lock); + raw_spin_lock(&ch->timeout.lock); if (!ch->timeout.initialized) { - mutex_unlock(&ch->timeout.lock); + raw_spin_unlock(&ch->timeout.lock); gk20a_channel_put(ch); continue; } - mutex_unlock(&ch->timeout.lock); + raw_spin_unlock(&ch->timeout.lock); cancel_delayed_work_sync(&ch->timeout.wq); if (!ch->has_timedout) @@ -1740,10 +1740,10 @@ static void gk20a_channel_timeout_handler(struct work_struct *work) ch->hw_chid); /* Get timed out job and reset the timer */ - mutex_lock(&ch->timeout.lock); + raw_spin_lock(&ch->timeout.lock); job = ch->timeout.job; ch->timeout.initialized = false; - mutex_unlock(&ch->timeout.lock); + raw_spin_unlock(&ch->timeout.lock); if (gk20a_fence_is_expired(job->post_fence)) { gk20a_err(dev_from_gk20a(g), @@ -2333,7 +2333,7 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid) mutex_init(&c->ioctl_lock); spin_lock_init(&c->jobs_lock); mutex_init(&c->last_submit.fence_lock); - mutex_init(&c->timeout.lock); + raw_spin_lock_init(&c->timeout.lock); mutex_init(&c->sync_lock); INIT_DELAYED_WORK(&c->timeout.wq, gk20a_channel_timeout_handler); INIT_DELAYED_WORK(&c->clean_up.wq, gk20a_channel_clean_up_jobs); diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h index 4b2019dd..5ad23bab 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h @@ -72,7 +72,7 @@ struct channel_gk20a_job { struct channel_gk20a_timeout { struct delayed_work wq; - struct mutex lock; + raw_spinlock_t lock; bool initialized; struct channel_gk20a_job *job; }; -- cgit v1.2.2