summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-01-24 08:30:42 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-02-22 07:15:02 -0500
commit8ee3aa4b3175d8d27e57a0f5d5e2cdf3d78a4a58 (patch)
tree505dfd2ea2aca2f1cbdb254baee980862d21e04d /drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
parent1f855af63fdd31fe3dcfee75f4f5f9b62f30d87e (diff)
gpu: nvgpu: use common nvgpu mutex/spinlock APIs
Instead of using Linux APIs for mutex and spinlocks directly, use new APIs defined in <nvgpu/lock.h> Replace Linux specific mutex/spinlock declaration, init, lock, unlock APIs with new APIs e.g struct mutex is replaced by struct nvgpu_mutex and mutex_lock() is replaced by nvgpu_mutex_acquire() And also include <nvgpu/lock.h> instead of including <linux/mutex.h> and <linux/spinlock.h> Add explicit nvgpu/lock.h includes to below files to fix complilation failures. gk20a/platform_gk20a.h include/nvgpu/allocator.h Jira NVGPU-13 Change-Id: I81a05d21ecdbd90c2076a9f0aefd0e40b215bd33 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1293187 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/ce2_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/ce2_gk20a.c46
1 files changed, 23 insertions, 23 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
index 023c959e..fd248313 100644
--- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
@@ -107,7 +107,7 @@ static void gk20a_ce_notify_all_user(struct gk20a *g, u32 event)
107 if (!ce_app->initialised) 107 if (!ce_app->initialised)
108 return; 108 return;
109 109
110 mutex_lock(&ce_app->app_mutex); 110 nvgpu_mutex_acquire(&ce_app->app_mutex);
111 111
112 list_for_each_entry_safe(ce_ctx, ce_ctx_save, 112 list_for_each_entry_safe(ce_ctx, ce_ctx_save,
113 &ce_app->allocated_contexts, list) { 113 &ce_app->allocated_contexts, list) {
@@ -117,7 +117,7 @@ static void gk20a_ce_notify_all_user(struct gk20a *g, u32 event)
117 } 117 }
118 } 118 }
119 119
120 mutex_unlock(&ce_app->app_mutex); 120 nvgpu_mutex_release(&ce_app->app_mutex);
121} 121}
122 122
123static void gk20a_ce_finished_ctx_cb(struct channel_gk20a *ch, void *data) 123static void gk20a_ce_finished_ctx_cb(struct channel_gk20a *ch, void *data)
@@ -183,14 +183,14 @@ static void gk20a_ce_free_command_buffer_stored_fence(struct gk20a_gpu_ctx *ce_c
183 } 183 }
184} 184}
185 185
186/* assume this api should need to call under mutex_lock(&ce_app->app_mutex) */ 186/* assume this api should need to call under nvgpu_mutex_acquire(&ce_app->app_mutex) */
187static void gk20a_ce_delete_gpu_context(struct gk20a_gpu_ctx *ce_ctx) 187static void gk20a_ce_delete_gpu_context(struct gk20a_gpu_ctx *ce_ctx)
188{ 188{
189 struct list_head *list = &ce_ctx->list; 189 struct list_head *list = &ce_ctx->list;
190 190
191 ce_ctx->gpu_ctx_state = NVGPU_CE_GPU_CTX_DELETED; 191 ce_ctx->gpu_ctx_state = NVGPU_CE_GPU_CTX_DELETED;
192 192
193 mutex_lock(&ce_ctx->gpu_ctx_mutex); 193 nvgpu_mutex_acquire(&ce_ctx->gpu_ctx_mutex);
194 194
195 if (ce_ctx->cmd_buf_mem.cpu_va) { 195 if (ce_ctx->cmd_buf_mem.cpu_va) {
196 gk20a_ce_free_command_buffer_stored_fence(ce_ctx); 196 gk20a_ce_free_command_buffer_stored_fence(ce_ctx);
@@ -205,8 +205,8 @@ static void gk20a_ce_delete_gpu_context(struct gk20a_gpu_ctx *ce_ctx)
205 if (list->prev && list->next) 205 if (list->prev && list->next)
206 list_del(list); 206 list_del(list);
207 207
208 mutex_unlock(&ce_ctx->gpu_ctx_mutex); 208 nvgpu_mutex_release(&ce_ctx->gpu_ctx_mutex);
209 mutex_destroy(&ce_ctx->gpu_ctx_mutex); 209 nvgpu_mutex_destroy(&ce_ctx->gpu_ctx_mutex);
210 210
211 kfree(ce_ctx); 211 kfree(ce_ctx);
212} 212}
@@ -353,8 +353,8 @@ int gk20a_init_ce_support(struct gk20a *g)
353 353
354 gk20a_dbg(gpu_dbg_fn, "ce: init"); 354 gk20a_dbg(gpu_dbg_fn, "ce: init");
355 355
356 mutex_init(&ce_app->app_mutex); 356 nvgpu_mutex_init(&ce_app->app_mutex);
357 mutex_lock(&ce_app->app_mutex); 357 nvgpu_mutex_acquire(&ce_app->app_mutex);
358 358
359 INIT_LIST_HEAD(&ce_app->allocated_contexts); 359 INIT_LIST_HEAD(&ce_app->allocated_contexts);
360 ce_app->ctx_count = 0; 360 ce_app->ctx_count = 0;
@@ -362,7 +362,7 @@ int gk20a_init_ce_support(struct gk20a *g)
362 ce_app->initialised = true; 362 ce_app->initialised = true;
363 ce_app->app_state = NVGPU_CE_ACTIVE; 363 ce_app->app_state = NVGPU_CE_ACTIVE;
364 364
365 mutex_unlock(&ce_app->app_mutex); 365 nvgpu_mutex_release(&ce_app->app_mutex);
366 gk20a_dbg(gpu_dbg_cde_ctx, "ce: init finished"); 366 gk20a_dbg(gpu_dbg_cde_ctx, "ce: init finished");
367 367
368 return 0; 368 return 0;
@@ -379,7 +379,7 @@ void gk20a_ce_destroy(struct gk20a *g)
379 ce_app->app_state = NVGPU_CE_SUSPEND; 379 ce_app->app_state = NVGPU_CE_SUSPEND;
380 ce_app->initialised = false; 380 ce_app->initialised = false;
381 381
382 mutex_lock(&ce_app->app_mutex); 382 nvgpu_mutex_acquire(&ce_app->app_mutex);
383 383
384 list_for_each_entry_safe(ce_ctx, ce_ctx_save, 384 list_for_each_entry_safe(ce_ctx, ce_ctx_save,
385 &ce_app->allocated_contexts, list) { 385 &ce_app->allocated_contexts, list) {
@@ -390,8 +390,8 @@ void gk20a_ce_destroy(struct gk20a *g)
390 ce_app->ctx_count = 0; 390 ce_app->ctx_count = 0;
391 ce_app->next_ctx_id = 0; 391 ce_app->next_ctx_id = 0;
392 392
393 mutex_unlock(&ce_app->app_mutex); 393 nvgpu_mutex_release(&ce_app->app_mutex);
394 mutex_destroy(&ce_app->app_mutex); 394 nvgpu_mutex_destroy(&ce_app->app_mutex);
395} 395}
396 396
397void gk20a_ce_suspend(struct gk20a *g) 397void gk20a_ce_suspend(struct gk20a *g)
@@ -428,7 +428,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev,
428 if (!ce_ctx) 428 if (!ce_ctx)
429 return ctx_id; 429 return ctx_id;
430 430
431 mutex_init(&ce_ctx->gpu_ctx_mutex); 431 nvgpu_mutex_init(&ce_ctx->gpu_ctx_mutex);
432 432
433 ce_ctx->g = g; 433 ce_ctx->g = g;
434 ce_ctx->dev = g->dev; 434 ce_ctx->dev = g->dev;
@@ -508,20 +508,20 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev,
508 } 508 }
509 } 509 }
510 510
511 mutex_lock(&ce_app->app_mutex); 511 nvgpu_mutex_acquire(&ce_app->app_mutex);
512 ctx_id = ce_ctx->ctx_id = ce_app->next_ctx_id; 512 ctx_id = ce_ctx->ctx_id = ce_app->next_ctx_id;
513 list_add(&ce_ctx->list, &ce_app->allocated_contexts); 513 list_add(&ce_ctx->list, &ce_app->allocated_contexts);
514 ++ce_app->next_ctx_id; 514 ++ce_app->next_ctx_id;
515 ++ce_app->ctx_count; 515 ++ce_app->ctx_count;
516 mutex_unlock(&ce_app->app_mutex); 516 nvgpu_mutex_release(&ce_app->app_mutex);
517 517
518 ce_ctx->gpu_ctx_state = NVGPU_CE_GPU_CTX_ALLOCATED; 518 ce_ctx->gpu_ctx_state = NVGPU_CE_GPU_CTX_ALLOCATED;
519 519
520end: 520end:
521 if (ctx_id == (u32)~0) { 521 if (ctx_id == (u32)~0) {
522 mutex_lock(&ce_app->app_mutex); 522 nvgpu_mutex_acquire(&ce_app->app_mutex);
523 gk20a_ce_delete_gpu_context(ce_ctx); 523 gk20a_ce_delete_gpu_context(ce_ctx);
524 mutex_unlock(&ce_app->app_mutex); 524 nvgpu_mutex_release(&ce_app->app_mutex);
525 } 525 }
526 return ctx_id; 526 return ctx_id;
527 527
@@ -558,7 +558,7 @@ int gk20a_ce_execute_ops(struct device *dev,
558 if (!ce_app->initialised ||ce_app->app_state != NVGPU_CE_ACTIVE) 558 if (!ce_app->initialised ||ce_app->app_state != NVGPU_CE_ACTIVE)
559 goto end; 559 goto end;
560 560
561 mutex_lock(&ce_app->app_mutex); 561 nvgpu_mutex_acquire(&ce_app->app_mutex);
562 562
563 list_for_each_entry_safe(ce_ctx, ce_ctx_save, 563 list_for_each_entry_safe(ce_ctx, ce_ctx_save,
564 &ce_app->allocated_contexts, list) { 564 &ce_app->allocated_contexts, list) {
@@ -568,7 +568,7 @@ int gk20a_ce_execute_ops(struct device *dev,
568 } 568 }
569 } 569 }
570 570
571 mutex_unlock(&ce_app->app_mutex); 571 nvgpu_mutex_release(&ce_app->app_mutex);
572 572
573 if (!found) { 573 if (!found) {
574 ret = -EINVAL; 574 ret = -EINVAL;
@@ -580,7 +580,7 @@ int gk20a_ce_execute_ops(struct device *dev,
580 goto end; 580 goto end;
581 } 581 }
582 582
583 mutex_lock(&ce_ctx->gpu_ctx_mutex); 583 nvgpu_mutex_acquire(&ce_ctx->gpu_ctx_mutex);
584 584
585 ce_ctx->cmd_buf_read_queue_offset %= ce_ctx->cmd_buf_end_queue_offset; 585 ce_ctx->cmd_buf_read_queue_offset %= ce_ctx->cmd_buf_end_queue_offset;
586 586
@@ -672,7 +672,7 @@ int gk20a_ce_execute_ops(struct device *dev,
672 } else 672 } else
673 ret = -ENOMEM; 673 ret = -ENOMEM;
674noop: 674noop:
675 mutex_unlock(&ce_ctx->gpu_ctx_mutex); 675 nvgpu_mutex_release(&ce_ctx->gpu_ctx_mutex);
676end: 676end:
677 return ret; 677 return ret;
678} 678}
@@ -688,7 +688,7 @@ void gk20a_ce_delete_context(struct device *dev,
688 if (!ce_app->initialised ||ce_app->app_state != NVGPU_CE_ACTIVE) 688 if (!ce_app->initialised ||ce_app->app_state != NVGPU_CE_ACTIVE)
689 return; 689 return;
690 690
691 mutex_lock(&ce_app->app_mutex); 691 nvgpu_mutex_acquire(&ce_app->app_mutex);
692 692
693 list_for_each_entry_safe(ce_ctx, ce_ctx_save, 693 list_for_each_entry_safe(ce_ctx, ce_ctx_save,
694 &ce_app->allocated_contexts, list) { 694 &ce_app->allocated_contexts, list) {
@@ -699,7 +699,7 @@ void gk20a_ce_delete_context(struct device *dev,
699 } 699 }
700 } 700 }
701 701
702 mutex_unlock(&ce_app->app_mutex); 702 nvgpu_mutex_release(&ce_app->app_mutex);
703 return; 703 return;
704} 704}
705EXPORT_SYMBOL(gk20a_ce_delete_context); 705EXPORT_SYMBOL(gk20a_ce_delete_context);