summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-01-24 08:30:42 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-02-22 07:15:02 -0500
commit8ee3aa4b3175d8d27e57a0f5d5e2cdf3d78a4a58 (patch)
tree505dfd2ea2aca2f1cbdb254baee980862d21e04d /drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
parent1f855af63fdd31fe3dcfee75f4f5f9b62f30d87e (diff)
gpu: nvgpu: use common nvgpu mutex/spinlock APIs
Instead of using Linux APIs for mutex and spinlocks directly, use new APIs defined in <nvgpu/lock.h> Replace Linux specific mutex/spinlock declaration, init, lock, unlock APIs with new APIs e.g struct mutex is replaced by struct nvgpu_mutex and mutex_lock() is replaced by nvgpu_mutex_acquire() And also include <nvgpu/lock.h> instead of including <linux/mutex.h> and <linux/spinlock.h> Add explicit nvgpu/lock.h includes to below files to fix complilation failures. gk20a/platform_gk20a.h include/nvgpu/allocator.h Jira NVGPU-13 Change-Id: I81a05d21ecdbd90c2076a9f0aefd0e40b215bd33 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1293187 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/fifo_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
index 339f2237..7b6ed322 100644
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Virtualized GPU Fifo 2 * Virtualized GPU Fifo
3 * 3 *
4 * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -218,7 +218,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
218 goto clean_up_runlist; 218 goto clean_up_runlist;
219 } 219 }
220 } 220 }
221 mutex_init(&runlist->mutex); 221 nvgpu_mutex_init(&runlist->mutex);
222 222
223 /* None of buffers is pinned if this value doesn't change. 223 /* None of buffers is pinned if this value doesn't change.
224 Otherwise, one of them (cur_buffer) must have been pinned. */ 224 Otherwise, one of them (cur_buffer) must have been pinned. */
@@ -294,7 +294,7 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
294 init_runlist(g, f); 294 init_runlist(g, f);
295 295
296 INIT_LIST_HEAD(&f->free_chs); 296 INIT_LIST_HEAD(&f->free_chs);
297 mutex_init(&f->free_chs_mutex); 297 nvgpu_mutex_init(&f->free_chs_mutex);
298 298
299 for (chid = 0; chid < f->num_channels; chid++) { 299 for (chid = 0; chid < f->num_channels; chid++) {
300 f->channel[chid].userd_iova = 300 f->channel[chid].userd_iova =
@@ -306,10 +306,10 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
306 gk20a_init_channel_support(g, chid); 306 gk20a_init_channel_support(g, chid);
307 gk20a_init_tsg_support(g, chid); 307 gk20a_init_tsg_support(g, chid);
308 } 308 }
309 mutex_init(&f->tsg_inuse_mutex); 309 nvgpu_mutex_init(&f->tsg_inuse_mutex);
310 310
311 f->deferred_reset_pending = false; 311 f->deferred_reset_pending = false;
312 mutex_init(&f->deferred_reset_mutex); 312 nvgpu_mutex_init(&f->deferred_reset_mutex);
313 313
314 f->sw_ready = true; 314 f->sw_ready = true;
315 315
@@ -534,12 +534,12 @@ static int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
534 534
535 runlist = &f->runlist_info[runlist_id]; 535 runlist = &f->runlist_info[runlist_id];
536 536
537 mutex_lock(&runlist->mutex); 537 nvgpu_mutex_acquire(&runlist->mutex);
538 538
539 ret = vgpu_fifo_update_runlist_locked(g, runlist_id, hw_chid, add, 539 ret = vgpu_fifo_update_runlist_locked(g, runlist_id, hw_chid, add,
540 wait_for_finish); 540 wait_for_finish);
541 541
542 mutex_unlock(&runlist->mutex); 542 nvgpu_mutex_release(&runlist->mutex);
543 return ret; 543 return ret;
544} 544}
545 545
@@ -679,7 +679,7 @@ static int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
679static void vgpu_fifo_set_ctx_mmu_error(struct gk20a *g, 679static void vgpu_fifo_set_ctx_mmu_error(struct gk20a *g,
680 struct channel_gk20a *ch) 680 struct channel_gk20a *ch)
681{ 681{
682 mutex_lock(&ch->error_notifier_mutex); 682 nvgpu_mutex_acquire(&ch->error_notifier_mutex);
683 if (ch->error_notifier_ref) { 683 if (ch->error_notifier_ref) {
684 if (ch->error_notifier->status == 0xffff) { 684 if (ch->error_notifier->status == 0xffff) {
685 /* If error code is already set, this mmu fault 685 /* If error code is already set, this mmu fault
@@ -691,7 +691,7 @@ static void vgpu_fifo_set_ctx_mmu_error(struct gk20a *g,
691 NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT); 691 NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT);
692 } 692 }
693 } 693 }
694 mutex_unlock(&ch->error_notifier_mutex); 694 nvgpu_mutex_release(&ch->error_notifier_mutex);
695 695
696 /* mark channel as faulted */ 696 /* mark channel as faulted */
697 ch->has_timedout = true; 697 ch->has_timedout = true;