summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-01-24 08:30:42 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-02-22 07:15:02 -0500
commit8ee3aa4b3175d8d27e57a0f5d5e2cdf3d78a4a58 (patch)
tree505dfd2ea2aca2f1cbdb254baee980862d21e04d /drivers/gpu/nvgpu/gm20b/acr_gm20b.c
parent1f855af63fdd31fe3dcfee75f4f5f9b62f30d87e (diff)
gpu: nvgpu: use common nvgpu mutex/spinlock APIs
Instead of using Linux APIs for mutex and spinlocks directly, use new APIs defined in <nvgpu/lock.h> Replace Linux specific mutex/spinlock declaration, init, lock, unlock APIs with new APIs e.g struct mutex is replaced by struct nvgpu_mutex and mutex_lock() is replaced by nvgpu_mutex_acquire() And also include <nvgpu/lock.h> instead of including <linux/mutex.h> and <linux/spinlock.h> Add explicit nvgpu/lock.h includes to below files to fix complilation failures. gk20a/platform_gk20a.h include/nvgpu/allocator.h Jira NVGPU-13 Change-Id: I81a05d21ecdbd90c2076a9f0aefd0e40b215bd33 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1293187 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/acr_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index 40a28136..c1cefc29 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -77,10 +77,10 @@ static get_ucode_details pmu_acr_supp_ucode_list[] = {
77static void start_gm20b_pmu(struct gk20a *g) 77static void start_gm20b_pmu(struct gk20a *g)
78{ 78{
79 /*disable irqs for hs falcon booting as we will poll for halt*/ 79 /*disable irqs for hs falcon booting as we will poll for halt*/
80 mutex_lock(&g->pmu.isr_mutex); 80 nvgpu_mutex_acquire(&g->pmu.isr_mutex);
81 pmu_enable_irq(&g->pmu, true); 81 pmu_enable_irq(&g->pmu, true);
82 g->pmu.isr_enabled = true; 82 g->pmu.isr_enabled = true;
83 mutex_unlock(&g->pmu.isr_mutex); 83 nvgpu_mutex_release(&g->pmu.isr_mutex);
84 gk20a_writel(g, pwr_falcon_cpuctl_alias_r(), 84 gk20a_writel(g, pwr_falcon_cpuctl_alias_r(),
85 pwr_falcon_cpuctl_startcpu_f(1)); 85 pwr_falcon_cpuctl_startcpu_f(1));
86} 86}
@@ -1282,10 +1282,10 @@ int gm20b_init_nspmu_setup_hw1(struct gk20a *g)
1282 1282
1283 gk20a_dbg_fn(""); 1283 gk20a_dbg_fn("");
1284 1284
1285 mutex_lock(&pmu->isr_mutex); 1285 nvgpu_mutex_acquire(&pmu->isr_mutex);
1286 pmu_reset(pmu); 1286 pmu_reset(pmu);
1287 pmu->isr_enabled = true; 1287 pmu->isr_enabled = true;
1288 mutex_unlock(&pmu->isr_mutex); 1288 nvgpu_mutex_release(&pmu->isr_mutex);
1289 1289
1290 /* setup apertures - virtual */ 1290 /* setup apertures - virtual */
1291 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE), 1291 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE),
@@ -1318,10 +1318,10 @@ static int gm20b_init_pmu_setup_hw1(struct gk20a *g,
1318 1318
1319 gk20a_dbg_fn(""); 1319 gk20a_dbg_fn("");
1320 1320
1321 mutex_lock(&pmu->isr_mutex); 1321 nvgpu_mutex_acquire(&pmu->isr_mutex);
1322 g->ops.pmu.reset(g); 1322 g->ops.pmu.reset(g);
1323 pmu->isr_enabled = true; 1323 pmu->isr_enabled = true;
1324 mutex_unlock(&pmu->isr_mutex); 1324 nvgpu_mutex_release(&pmu->isr_mutex);
1325 1325
1326 /* setup apertures - virtual */ 1326 /* setup apertures - virtual */
1327 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE), 1327 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE),
@@ -1353,10 +1353,10 @@ static int gm20b_init_pmu_setup_hw1(struct gk20a *g,
1353 (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)), 1353 (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
1354 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0); 1354 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
1355 /*disable irqs for hs falcon booting as we will poll for halt*/ 1355 /*disable irqs for hs falcon booting as we will poll for halt*/
1356 mutex_lock(&pmu->isr_mutex); 1356 nvgpu_mutex_acquire(&pmu->isr_mutex);
1357 pmu_enable_irq(pmu, false); 1357 pmu_enable_irq(pmu, false);
1358 pmu->isr_enabled = false; 1358 pmu->isr_enabled = false;
1359 mutex_unlock(&pmu->isr_mutex); 1359 nvgpu_mutex_release(&pmu->isr_mutex);
1360 /*Clearing mailbox register used to reflect capabilities*/ 1360 /*Clearing mailbox register used to reflect capabilities*/
1361 gk20a_writel(g, pwr_falcon_mailbox1_r(), 0); 1361 gk20a_writel(g, pwr_falcon_mailbox1_r(), 0);
1362 err = bl_bootstrap(pmu, desc, bl_sz); 1362 err = bl_bootstrap(pmu, desc, bl_sz);