summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAingara Paramakuru <aparamakuru@nvidia.com>2016-08-10 20:03:49 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2016-09-13 13:13:41 -0400
commitc0dd9ea9c85295207774de5637dc0c581ff8ee6e (patch)
tree1e9b669741a413b46b7eeaf870a0039311077ac3 /drivers
parent7ff4a760a83aaff0c214a5564530a0f32de40a84 (diff)
gpu: nvgpu: use spinlock for ch timeout lock
The channel timeout lock guards a very small critical section. Use a spinlock instead of a mutex for performance. Bug 1795076 Change-Id: I94940f3fbe84ed539bcf1bc76ca6ae7a0ef2fe13 Signed-off-by: Aingara Paramakuru <aparamakuru@nvidia.com> Reviewed-on: http://git-master/r/1200803 (cherry picked from commit 4fa9e973da141067be145d9eba2ea74e96869dcd) Reviewed-on: http://git-master/r/1203799 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c30
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.h2
2 files changed, 16 insertions, 16 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index d23a8026..4c03f955 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -1660,35 +1660,35 @@ static void gk20a_channel_timeout_start(struct channel_gk20a *ch,
1660 if (!ch->wdt_enabled) 1660 if (!ch->wdt_enabled)
1661 return; 1661 return;
1662 1662
1663 mutex_lock(&ch->timeout.lock); 1663 raw_spin_lock(&ch->timeout.lock);
1664 1664
1665 if (ch->timeout.initialized) { 1665 if (ch->timeout.initialized) {
1666 mutex_unlock(&ch->timeout.lock); 1666 raw_spin_unlock(&ch->timeout.lock);
1667 return; 1667 return;
1668 } 1668 }
1669 1669
1670 ch->timeout.job = job; 1670 ch->timeout.job = job;
1671 ch->timeout.initialized = true; 1671 ch->timeout.initialized = true;
1672 raw_spin_unlock(&ch->timeout.lock);
1673
1672 schedule_delayed_work(&ch->timeout.wq, 1674 schedule_delayed_work(&ch->timeout.wq,
1673 msecs_to_jiffies(gk20a_get_channel_watchdog_timeout(ch))); 1675 msecs_to_jiffies(gk20a_get_channel_watchdog_timeout(ch)));
1674
1675 mutex_unlock(&ch->timeout.lock);
1676} 1676}
1677 1677
1678static void gk20a_channel_timeout_stop(struct channel_gk20a *ch) 1678static void gk20a_channel_timeout_stop(struct channel_gk20a *ch)
1679{ 1679{
1680 mutex_lock(&ch->timeout.lock); 1680 raw_spin_lock(&ch->timeout.lock);
1681 if (!ch->timeout.initialized) { 1681 if (!ch->timeout.initialized) {
1682 mutex_unlock(&ch->timeout.lock); 1682 raw_spin_unlock(&ch->timeout.lock);
1683 return; 1683 return;
1684 } 1684 }
1685 mutex_unlock(&ch->timeout.lock); 1685 raw_spin_unlock(&ch->timeout.lock);
1686 1686
1687 cancel_delayed_work_sync(&ch->timeout.wq); 1687 cancel_delayed_work_sync(&ch->timeout.wq);
1688 1688
1689 mutex_lock(&ch->timeout.lock); 1689 raw_spin_lock(&ch->timeout.lock);
1690 ch->timeout.initialized = false; 1690 ch->timeout.initialized = false;
1691 mutex_unlock(&ch->timeout.lock); 1691 raw_spin_unlock(&ch->timeout.lock);
1692} 1692}
1693 1693
1694void gk20a_channel_timeout_restart_all_channels(struct gk20a *g) 1694void gk20a_channel_timeout_restart_all_channels(struct gk20a *g)
@@ -1700,13 +1700,13 @@ void gk20a_channel_timeout_restart_all_channels(struct gk20a *g)
1700 struct channel_gk20a *ch = &f->channel[chid]; 1700 struct channel_gk20a *ch = &f->channel[chid];
1701 1701
1702 if (gk20a_channel_get(ch)) { 1702 if (gk20a_channel_get(ch)) {
1703 mutex_lock(&ch->timeout.lock); 1703 raw_spin_lock(&ch->timeout.lock);
1704 if (!ch->timeout.initialized) { 1704 if (!ch->timeout.initialized) {
1705 mutex_unlock(&ch->timeout.lock); 1705 raw_spin_unlock(&ch->timeout.lock);
1706 gk20a_channel_put(ch); 1706 gk20a_channel_put(ch);
1707 continue; 1707 continue;
1708 } 1708 }
1709 mutex_unlock(&ch->timeout.lock); 1709 raw_spin_unlock(&ch->timeout.lock);
1710 1710
1711 cancel_delayed_work_sync(&ch->timeout.wq); 1711 cancel_delayed_work_sync(&ch->timeout.wq);
1712 if (!ch->has_timedout) 1712 if (!ch->has_timedout)
@@ -1740,10 +1740,10 @@ static void gk20a_channel_timeout_handler(struct work_struct *work)
1740 ch->hw_chid); 1740 ch->hw_chid);
1741 1741
1742 /* Get timed out job and reset the timer */ 1742 /* Get timed out job and reset the timer */
1743 mutex_lock(&ch->timeout.lock); 1743 raw_spin_lock(&ch->timeout.lock);
1744 job = ch->timeout.job; 1744 job = ch->timeout.job;
1745 ch->timeout.initialized = false; 1745 ch->timeout.initialized = false;
1746 mutex_unlock(&ch->timeout.lock); 1746 raw_spin_unlock(&ch->timeout.lock);
1747 1747
1748 if (gk20a_fence_is_expired(job->post_fence)) { 1748 if (gk20a_fence_is_expired(job->post_fence)) {
1749 gk20a_err(dev_from_gk20a(g), 1749 gk20a_err(dev_from_gk20a(g),
@@ -2333,7 +2333,7 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
2333 mutex_init(&c->ioctl_lock); 2333 mutex_init(&c->ioctl_lock);
2334 spin_lock_init(&c->jobs_lock); 2334 spin_lock_init(&c->jobs_lock);
2335 mutex_init(&c->last_submit.fence_lock); 2335 mutex_init(&c->last_submit.fence_lock);
2336 mutex_init(&c->timeout.lock); 2336 raw_spin_lock_init(&c->timeout.lock);
2337 mutex_init(&c->sync_lock); 2337 mutex_init(&c->sync_lock);
2338 INIT_DELAYED_WORK(&c->timeout.wq, gk20a_channel_timeout_handler); 2338 INIT_DELAYED_WORK(&c->timeout.wq, gk20a_channel_timeout_handler);
2339 INIT_DELAYED_WORK(&c->clean_up.wq, gk20a_channel_clean_up_jobs); 2339 INIT_DELAYED_WORK(&c->clean_up.wq, gk20a_channel_clean_up_jobs);
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
index 4b2019dd..5ad23bab 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
@@ -72,7 +72,7 @@ struct channel_gk20a_job {
72 72
73struct channel_gk20a_timeout { 73struct channel_gk20a_timeout {
74 struct delayed_work wq; 74 struct delayed_work wq;
75 struct mutex lock; 75 raw_spinlock_t lock;
76 bool initialized; 76 bool initialized;
77 struct channel_gk20a_job *job; 77 struct channel_gk20a_job *job;
78}; 78};