summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-01-24 08:30:42 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-02-22 07:15:02 -0500
commit8ee3aa4b3175d8d27e57a0f5d5e2cdf3d78a4a58 (patch)
tree505dfd2ea2aca2f1cbdb254baee980862d21e04d /drivers/gpu/nvgpu/gp10b/gr_gp10b.c
parent1f855af63fdd31fe3dcfee75f4f5f9b62f30d87e (diff)
gpu: nvgpu: use common nvgpu mutex/spinlock APIs
Instead of using Linux APIs for mutex and spinlocks directly, use new APIs defined in <nvgpu/lock.h> Replace Linux specific mutex/spinlock declaration, init, lock, unlock APIs with new APIs e.g struct mutex is replaced by struct nvgpu_mutex and mutex_lock() is replaced by nvgpu_mutex_acquire() And also include <nvgpu/lock.h> instead of including <linux/mutex.h> and <linux/spinlock.h> Add explicit nvgpu/lock.h includes to below files to fix complilation failures. gk20a/platform_gk20a.h include/nvgpu/allocator.h Jira NVGPU-13 Change-Id: I81a05d21ecdbd90c2076a9f0aefd0e40b215bd33 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1293187 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/gr_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
index e680e753..da121b56 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
@@ -1997,16 +1997,16 @@ static int gr_gp10b_suspend_contexts(struct gk20a *g,
1997 int local_ctx_resident_ch_fd = -1; 1997 int local_ctx_resident_ch_fd = -1;
1998 bool ctx_resident; 1998 bool ctx_resident;
1999 1999
2000 mutex_lock(&g->dbg_sessions_lock); 2000 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
2001 2001
2002 err = gr_gk20a_disable_ctxsw(g); 2002 err = gr_gk20a_disable_ctxsw(g);
2003 if (err) { 2003 if (err) {
2004 gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw"); 2004 gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw");
2005 mutex_unlock(&g->dbg_sessions_lock); 2005 nvgpu_mutex_release(&g->dbg_sessions_lock);
2006 goto clean_up; 2006 goto clean_up;
2007 } 2007 }
2008 2008
2009 mutex_lock(&dbg_s->ch_list_lock); 2009 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
2010 2010
2011 list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) { 2011 list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) {
2012 ch = g->fifo.channel + ch_data->chid; 2012 ch = g->fifo.channel + ch_data->chid;
@@ -2019,15 +2019,15 @@ static int gr_gp10b_suspend_contexts(struct gk20a *g,
2019 cilp_preempt_pending_ch = ch; 2019 cilp_preempt_pending_ch = ch;
2020 } 2020 }
2021 2021
2022 mutex_unlock(&dbg_s->ch_list_lock); 2022 nvgpu_mutex_release(&dbg_s->ch_list_lock);
2023 2023
2024 err = gr_gk20a_enable_ctxsw(g); 2024 err = gr_gk20a_enable_ctxsw(g);
2025 if (err) { 2025 if (err) {
2026 mutex_unlock(&g->dbg_sessions_lock); 2026 nvgpu_mutex_release(&g->dbg_sessions_lock);
2027 goto clean_up; 2027 goto clean_up;
2028 } 2028 }
2029 2029
2030 mutex_unlock(&g->dbg_sessions_lock); 2030 nvgpu_mutex_release(&g->dbg_sessions_lock);
2031 2031
2032 if (cilp_preempt_pending_ch) { 2032 if (cilp_preempt_pending_ch) {
2033 struct channel_ctx_gk20a *ch_ctx = 2033 struct channel_ctx_gk20a *ch_ctx =