summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-01-24 08:30:42 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-02-22 07:15:02 -0500
commit8ee3aa4b3175d8d27e57a0f5d5e2cdf3d78a4a58 (patch)
tree505dfd2ea2aca2f1cbdb254baee980862d21e04d /drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
parent1f855af63fdd31fe3dcfee75f4f5f9b62f30d87e (diff)
gpu: nvgpu: use common nvgpu mutex/spinlock APIs
Instead of using Linux APIs for mutex and spinlocks directly, use new APIs defined in <nvgpu/lock.h> Replace Linux specific mutex/spinlock declaration, init, lock, unlock APIs with new APIs e.g struct mutex is replaced by struct nvgpu_mutex and mutex_lock() is replaced by nvgpu_mutex_acquire() And also include <nvgpu/lock.h> instead of including <linux/mutex.h> and <linux/spinlock.h> Add explicit nvgpu/lock.h includes to below files to fix complilation failures. gk20a/platform_gk20a.h include/nvgpu/allocator.h Jira NVGPU-13 Change-Id: I81a05d21ecdbd90c2076a9f0aefd0e40b215bd33 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1293187 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
index 18971b09..097635a7 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
@@ -414,9 +414,9 @@ struct wait_fence_work {
414static void gk20a_add_pending_sema_wait(struct gk20a *g, 414static void gk20a_add_pending_sema_wait(struct gk20a *g,
415 struct wait_fence_work *work) 415 struct wait_fence_work *work)
416{ 416{
417 raw_spin_lock(&g->pending_sema_waits_lock); 417 nvgpu_raw_spinlock_acquire(&g->pending_sema_waits_lock);
418 list_add(&work->entry, &g->pending_sema_waits); 418 list_add(&work->entry, &g->pending_sema_waits);
419 raw_spin_unlock(&g->pending_sema_waits_lock); 419 nvgpu_raw_spinlock_release(&g->pending_sema_waits_lock);
420} 420}
421 421
422/* 422/*
@@ -426,9 +426,9 @@ static void gk20a_add_pending_sema_wait(struct gk20a *g,
426static void gk20a_start_sema_wait_cancel(struct gk20a *g, 426static void gk20a_start_sema_wait_cancel(struct gk20a *g,
427 struct list_head *list) 427 struct list_head *list)
428{ 428{
429 raw_spin_lock(&g->pending_sema_waits_lock); 429 nvgpu_raw_spinlock_acquire(&g->pending_sema_waits_lock);
430 list_replace_init(&g->pending_sema_waits, list); 430 list_replace_init(&g->pending_sema_waits, list);
431 raw_spin_unlock(&g->pending_sema_waits_lock); 431 nvgpu_raw_spinlock_release(&g->pending_sema_waits_lock);
432} 432}
433 433
434/* 434/*
@@ -486,10 +486,10 @@ static void gk20a_channel_semaphore_launcher(
486 * This spinlock must protect a _very_ small critical section - 486 * This spinlock must protect a _very_ small critical section -
487 * otherwise it's possible that the deterministic submit path suffers. 487 * otherwise it's possible that the deterministic submit path suffers.
488 */ 488 */
489 raw_spin_lock(&g->pending_sema_waits_lock); 489 nvgpu_raw_spinlock_acquire(&g->pending_sema_waits_lock);
490 if (!list_empty(&g->pending_sema_waits)) 490 if (!list_empty(&g->pending_sema_waits))
491 list_del_init(&w->entry); 491 list_del_init(&w->entry);
492 raw_spin_unlock(&g->pending_sema_waits_lock); 492 nvgpu_raw_spinlock_release(&g->pending_sema_waits_lock);
493 493
494 gk20a_dbg_info("waiting for pre fence %p '%s'", 494 gk20a_dbg_info("waiting for pre fence %p '%s'",
495 fence, fence->name); 495 fence, fence->name);