summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-01-24 08:30:42 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-02-22 07:15:02 -0500
commit8ee3aa4b3175d8d27e57a0f5d5e2cdf3d78a4a58 (patch)
tree505dfd2ea2aca2f1cbdb254baee980862d21e04d /drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
parent1f855af63fdd31fe3dcfee75f4f5f9b62f30d87e (diff)
gpu: nvgpu: use common nvgpu mutex/spinlock APIs
Instead of using Linux APIs for mutex and spinlocks directly, use new APIs defined in <nvgpu/lock.h> Replace Linux specific mutex/spinlock declaration, init, lock, unlock APIs with new APIs e.g struct mutex is replaced by struct nvgpu_mutex and mutex_lock() is replaced by nvgpu_mutex_acquire() And also include <nvgpu/lock.h> instead of including <linux/mutex.h> and <linux/spinlock.h> Add explicit nvgpu/lock.h includes to below files to fix complilation failures. gk20a/platform_gk20a.h include/nvgpu/allocator.h Jira NVGPU-13 Change-Id: I81a05d21ecdbd90c2076a9f0aefd0e40b215bd33 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1293187 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c b/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
index 5c9baf77..351be55e 100644
--- a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
@@ -349,7 +349,7 @@ static int nvgpu_gpu_ioctl_inval_icache(
349 ops.offset = gr_pri_gpc0_gcc_dbg_r(); 349 ops.offset = gr_pri_gpc0_gcc_dbg_r();
350 350
351 /* Take the global lock, since we'll be doing global regops */ 351 /* Take the global lock, since we'll be doing global regops */
352 mutex_lock(&g->dbg_sessions_lock); 352 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
353 353
354 err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 0, 1); 354 err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 0, 1);
355 355
@@ -371,7 +371,7 @@ static int nvgpu_gpu_ioctl_inval_icache(
371 gk20a_writel(g, gr_pri_gpc0_tpc0_sm_cache_control_r(), cache_ctrl); 371 gk20a_writel(g, gr_pri_gpc0_tpc0_sm_cache_control_r(), cache_ctrl);
372 372
373end: 373end:
374 mutex_unlock(&g->dbg_sessions_lock); 374 nvgpu_mutex_release(&g->dbg_sessions_lock);
375 return err; 375 return err;
376} 376}
377 377
@@ -384,9 +384,9 @@ static int nvgpu_gpu_ioctl_set_mmu_debug_mode(
384 return -EINVAL; 384 return -EINVAL;
385 } 385 }
386 386
387 mutex_lock(&g->dbg_sessions_lock); 387 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
388 g->ops.mm.set_debug_mode(g, args->state == 1); 388 g->ops.mm.set_debug_mode(g, args->state == 1);
389 mutex_unlock(&g->dbg_sessions_lock); 389 nvgpu_mutex_release(&g->dbg_sessions_lock);
390 390
391 gk20a_idle(g->dev); 391 gk20a_idle(g->dev);
392 return 0; 392 return 0;
@@ -403,13 +403,13 @@ static int nvgpu_gpu_ioctl_set_debug_mode(
403 if (!ch) 403 if (!ch)
404 return -EINVAL; 404 return -EINVAL;
405 405
406 mutex_lock(&g->dbg_sessions_lock); 406 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
407 if (g->ops.gr.set_sm_debug_mode) 407 if (g->ops.gr.set_sm_debug_mode)
408 err = g->ops.gr.set_sm_debug_mode(g, ch, 408 err = g->ops.gr.set_sm_debug_mode(g, ch,
409 args->sms, !!args->enable); 409 args->sms, !!args->enable);
410 else 410 else
411 err = -ENOSYS; 411 err = -ENOSYS;
412 mutex_unlock(&g->dbg_sessions_lock); 412 nvgpu_mutex_release(&g->dbg_sessions_lock);
413 413
414 return err; 414 return err;
415} 415}
@@ -419,7 +419,7 @@ static int nvgpu_gpu_ioctl_trigger_suspend(struct gk20a *g)
419 int err = 0; 419 int err = 0;
420 u32 dbgr_control0; 420 u32 dbgr_control0;
421 421
422 mutex_lock(&g->dbg_sessions_lock); 422 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
423 /* assert stop trigger. uniformity assumption: all SMs will have 423 /* assert stop trigger. uniformity assumption: all SMs will have
424 * the same state in dbg_control0. */ 424 * the same state in dbg_control0. */
425 dbgr_control0 = 425 dbgr_control0 =
@@ -430,7 +430,7 @@ static int nvgpu_gpu_ioctl_trigger_suspend(struct gk20a *g)
430 gk20a_writel(g, 430 gk20a_writel(g,
431 gr_gpcs_tpcs_sm_dbgr_control0_r(), dbgr_control0); 431 gr_gpcs_tpcs_sm_dbgr_control0_r(), dbgr_control0);
432 432
433 mutex_unlock(&g->dbg_sessions_lock); 433 nvgpu_mutex_release(&g->dbg_sessions_lock);
434 return err; 434 return err;
435} 435}
436 436
@@ -456,7 +456,7 @@ static int nvgpu_gpu_ioctl_wait_for_pause(struct gk20a *g,
456 gr_gpc0_tpc0_sm_hww_global_esr_bpt_pause_pending_f() | 456 gr_gpc0_tpc0_sm_hww_global_esr_bpt_pause_pending_f() |
457 gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f(); 457 gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f();
458 458
459 mutex_lock(&g->dbg_sessions_lock); 459 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
460 460
461 /* Lock down all SMs */ 461 /* Lock down all SMs */
462 for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { 462 for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) {
@@ -482,7 +482,7 @@ static int nvgpu_gpu_ioctl_wait_for_pause(struct gk20a *g,
482 } 482 }
483 483
484end: 484end:
485 mutex_unlock(&g->dbg_sessions_lock); 485 nvgpu_mutex_release(&g->dbg_sessions_lock);
486 kfree(w_state); 486 kfree(w_state);
487 return err; 487 return err;
488} 488}
@@ -491,7 +491,7 @@ static int nvgpu_gpu_ioctl_resume_from_pause(struct gk20a *g)
491{ 491{
492 int err = 0; 492 int err = 0;
493 493
494 mutex_lock(&g->dbg_sessions_lock); 494 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
495 495
496 /* Clear the pause mask to tell the GPU we want to resume everyone */ 496 /* Clear the pause mask to tell the GPU we want to resume everyone */
497 gk20a_writel(g, 497 gk20a_writel(g,
@@ -505,7 +505,7 @@ static int nvgpu_gpu_ioctl_resume_from_pause(struct gk20a *g)
505 * then a 1 to the run trigger */ 505 * then a 1 to the run trigger */
506 gk20a_resume_all_sms(g); 506 gk20a_resume_all_sms(g);
507 507
508 mutex_unlock(&g->dbg_sessions_lock); 508 nvgpu_mutex_release(&g->dbg_sessions_lock);
509 return err; 509 return err;
510} 510}
511 511
@@ -551,7 +551,7 @@ static int nvgpu_gpu_ioctl_has_any_exception(
551 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 551 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
552 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); 552 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);
553 553
554 mutex_lock(&g->dbg_sessions_lock); 554 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
555 555
556 for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { 556 for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) {
557 557
@@ -565,7 +565,7 @@ static int nvgpu_gpu_ioctl_has_any_exception(
565 tpc_exception_en |= gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_v(regval) << sm_id; 565 tpc_exception_en |= gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_v(regval) << sm_id;
566 } 566 }
567 567
568 mutex_unlock(&g->dbg_sessions_lock); 568 nvgpu_mutex_release(&g->dbg_sessions_lock);
569 args->tpc_exception_en_sm_mask = tpc_exception_en; 569 args->tpc_exception_en_sm_mask = tpc_exception_en;
570 return err; 570 return err;
571} 571}