summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp106/sec2_gp106.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-01-24 08:30:42 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-02-22 07:15:02 -0500
commit8ee3aa4b3175d8d27e57a0f5d5e2cdf3d78a4a58 (patch)
tree505dfd2ea2aca2f1cbdb254baee980862d21e04d /drivers/gpu/nvgpu/gp106/sec2_gp106.c
parent1f855af63fdd31fe3dcfee75f4f5f9b62f30d87e (diff)
gpu: nvgpu: use common nvgpu mutex/spinlock APIs
Instead of using Linux APIs for mutex and spinlocks directly, use new APIs defined in <nvgpu/lock.h> Replace Linux specific mutex/spinlock declaration, init, lock, unlock APIs with new APIs e.g struct mutex is replaced by struct nvgpu_mutex and mutex_lock() is replaced by nvgpu_mutex_acquire() And also include <nvgpu/lock.h> instead of including <linux/mutex.h> and <linux/spinlock.h> Add explicit nvgpu/lock.h includes to below files to fix complilation failures. gk20a/platform_gk20a.h include/nvgpu/allocator.h Jira NVGPU-13 Change-Id: I81a05d21ecdbd90c2076a9f0aefd0e40b215bd33 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1293187 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp106/sec2_gp106.c')
-rw-r--r--drivers/gpu/nvgpu/gp106/sec2_gp106.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/gpu/nvgpu/gp106/sec2_gp106.c b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
index 9af16886..51e76605 100644
--- a/drivers/gpu/nvgpu/gp106/sec2_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
@@ -114,7 +114,7 @@ void sec2_copy_to_dmem(struct pmu_gk20a *pmu,
114 return; 114 return;
115 } 115 }
116 116
117 mutex_lock(&pmu->pmu_copy_lock); 117 nvgpu_mutex_acquire(&pmu->pmu_copy_lock);
118 118
119 words = size >> 2; 119 words = size >> 2;
120 bytes = size & 0x3; 120 bytes = size & 0x3;
@@ -144,7 +144,7 @@ void sec2_copy_to_dmem(struct pmu_gk20a *pmu,
144 "copy failed. bytes written %d, expected %d", 144 "copy failed. bytes written %d, expected %d",
145 data - dst, size); 145 data - dst, size);
146 } 146 }
147 mutex_unlock(&pmu->pmu_copy_lock); 147 nvgpu_mutex_release(&pmu->pmu_copy_lock);
148 return; 148 return;
149} 149}
150 150
@@ -348,10 +348,10 @@ int init_sec2_setup_hw1(struct gk20a *g,
348 348
349 gk20a_dbg_fn(""); 349 gk20a_dbg_fn("");
350 350
351 mutex_lock(&pmu->isr_mutex); 351 nvgpu_mutex_acquire(&pmu->isr_mutex);
352 g->ops.pmu.reset(g); 352 g->ops.pmu.reset(g);
353 pmu->isr_enabled = true; 353 pmu->isr_enabled = true;
354 mutex_unlock(&pmu->isr_mutex); 354 nvgpu_mutex_release(&pmu->isr_mutex);
355 355
356 data = gk20a_readl(g, psec_fbif_ctl_r()); 356 data = gk20a_readl(g, psec_fbif_ctl_r());
357 data |= psec_fbif_ctl_allow_phys_no_ctx_allow_f(); 357 data |= psec_fbif_ctl_allow_phys_no_ctx_allow_f();
@@ -379,11 +379,11 @@ int init_sec2_setup_hw1(struct gk20a *g,
379 psec_fbif_transcfg_target_noncoherent_sysmem_f()); 379 psec_fbif_transcfg_target_noncoherent_sysmem_f());
380 380
381 /*disable irqs for hs falcon booting as we will poll for halt*/ 381 /*disable irqs for hs falcon booting as we will poll for halt*/
382 mutex_lock(&pmu->isr_mutex); 382 nvgpu_mutex_acquire(&pmu->isr_mutex);
383 pmu_enable_irq(pmu, false); 383 pmu_enable_irq(pmu, false);
384 sec_enable_irq(pmu, false); 384 sec_enable_irq(pmu, false);
385 pmu->isr_enabled = false; 385 pmu->isr_enabled = false;
386 mutex_unlock(&pmu->isr_mutex); 386 nvgpu_mutex_release(&pmu->isr_mutex);
387 err = bl_bootstrap_sec2(pmu, desc, bl_sz); 387 err = bl_bootstrap_sec2(pmu, desc, bl_sz);
388 if (err) 388 if (err)
389 return err; 389 return err;