summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/ce2_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/ce2_gk20a.c66
1 files changed, 4 insertions, 62 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
index 9ff6c792..04ee84f4 100644
--- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
@@ -103,54 +103,6 @@ int gk20a_ce2_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base)
103} 103}
104 104
105/* static CE app api */ 105/* static CE app api */
106static void gk20a_ce_notify_all_user(struct gk20a *g, u32 event)
107{
108 struct gk20a_ce_app *ce_app = &g->ce_app;
109 struct gk20a_gpu_ctx *ce_ctx, *ce_ctx_save;
110
111 if (!ce_app->initialised)
112 return;
113
114 nvgpu_mutex_acquire(&ce_app->app_mutex);
115
116 nvgpu_list_for_each_entry_safe(ce_ctx, ce_ctx_save,
117 &ce_app->allocated_contexts, gk20a_gpu_ctx, list) {
118 if (ce_ctx->user_event_callback) {
119 ce_ctx->user_event_callback(ce_ctx->ctx_id,
120 event);
121 }
122 }
123
124 nvgpu_mutex_release(&ce_app->app_mutex);
125}
126
127static void gk20a_ce_finished_ctx_cb(struct channel_gk20a *ch, void *data)
128{
129 struct gk20a_gpu_ctx *ce_ctx = data;
130 bool channel_idle;
131 u32 event;
132
133 channel_gk20a_joblist_lock(ch);
134 channel_idle = channel_gk20a_joblist_is_empty(ch);
135 channel_gk20a_joblist_unlock(ch);
136
137 if (!channel_idle)
138 return;
139
140 gk20a_dbg(gpu_dbg_fn, "ce: finished %p", ce_ctx);
141
142 if (ch->has_timedout)
143 event = NVGPU_CE_CONTEXT_JOB_TIMEDOUT;
144 else
145 event = NVGPU_CE_CONTEXT_JOB_COMPLETED;
146
147 if (ce_ctx->user_event_callback)
148 ce_ctx->user_event_callback(ce_ctx->ctx_id,
149 event);
150
151 ++ce_ctx->completed_seq_number;
152}
153
154static void gk20a_ce_free_command_buffer_stored_fence(struct gk20a_gpu_ctx *ce_ctx) 106static void gk20a_ce_free_command_buffer_stored_fence(struct gk20a_gpu_ctx *ce_ctx)
155{ 107{
156 u32 cmd_buf_index; 108 u32 cmd_buf_index;
@@ -410,7 +362,6 @@ int gk20a_init_ce_support(struct gk20a *g)
410 if (ce_app->initialised) { 362 if (ce_app->initialised) {
411 /* assume this happen during poweron/poweroff GPU sequence */ 363 /* assume this happen during poweron/poweroff GPU sequence */
412 ce_app->app_state = NVGPU_CE_ACTIVE; 364 ce_app->app_state = NVGPU_CE_ACTIVE;
413 gk20a_ce_notify_all_user(g, NVGPU_CE_CONTEXT_RESUME);
414 return 0; 365 return 0;
415 } 366 }
416 367
@@ -469,18 +420,16 @@ void gk20a_ce_suspend(struct gk20a *g)
469 return; 420 return;
470 421
471 ce_app->app_state = NVGPU_CE_SUSPEND; 422 ce_app->app_state = NVGPU_CE_SUSPEND;
472 gk20a_ce_notify_all_user(g, NVGPU_CE_CONTEXT_SUSPEND);
473 423
474 return; 424 return;
475} 425}
476 426
477/* CE app utility functions */ 427/* CE app utility functions */
478u32 gk20a_ce_create_context_with_cb(struct gk20a *g, 428u32 gk20a_ce_create_context(struct gk20a *g,
479 int runlist_id, 429 int runlist_id,
480 int priority, 430 int priority,
481 int timeslice, 431 int timeslice,
482 int runlist_level, 432 int runlist_level)
483 ce_event_callback user_event_callback)
484{ 433{
485 struct gk20a_gpu_ctx *ce_ctx; 434 struct gk20a_gpu_ctx *ce_ctx;
486 struct gk20a_ce_app *ce_app = &g->ce_app; 435 struct gk20a_ce_app *ce_app = &g->ce_app;
@@ -501,15 +450,11 @@ u32 gk20a_ce_create_context_with_cb(struct gk20a *g,
501 } 450 }
502 451
503 ce_ctx->g = g; 452 ce_ctx->g = g;
504 ce_ctx->user_event_callback = user_event_callback;
505 453
506 ce_ctx->cmd_buf_read_queue_offset = 0; 454 ce_ctx->cmd_buf_read_queue_offset = 0;
507 ce_ctx->cmd_buf_end_queue_offset = 455 ce_ctx->cmd_buf_end_queue_offset =
508 (NVGPU_CE_COMMAND_BUF_SIZE / NVGPU_CE_MAX_COMMAND_BUFF_SIZE_PER_KICKOFF); 456 (NVGPU_CE_COMMAND_BUF_SIZE / NVGPU_CE_MAX_COMMAND_BUFF_SIZE_PER_KICKOFF);
509 457
510 ce_ctx->submitted_seq_number = 0;
511 ce_ctx->completed_seq_number = 0;
512
513 ce_ctx->vm = g->mm.ce.vm; 458 ce_ctx->vm = g->mm.ce.vm;
514 459
515 if (nvgpu_is_enabled(g, NVGPU_MM_CE_TSG_REQUIRED)) { 460 if (nvgpu_is_enabled(g, NVGPU_MM_CE_TSG_REQUIRED)) {
@@ -523,10 +468,7 @@ u32 gk20a_ce_create_context_with_cb(struct gk20a *g,
523 } 468 }
524 469
525 /* always kernel client needs privileged channel */ 470 /* always kernel client needs privileged channel */
526 ce_ctx->ch = gk20a_open_new_channel_with_cb(g, gk20a_ce_finished_ctx_cb, 471 ce_ctx->ch = gk20a_open_new_channel(g, runlist_id, true);
527 ce_ctx,
528 runlist_id,
529 true);
530 if (!ce_ctx->ch) { 472 if (!ce_ctx->ch) {
531 nvgpu_err(g, "ce: gk20a channel not available"); 473 nvgpu_err(g, "ce: gk20a channel not available");
532 goto end; 474 goto end;
@@ -613,7 +555,7 @@ end:
613 return ctx_id; 555 return ctx_id;
614 556
615} 557}
616EXPORT_SYMBOL(gk20a_ce_create_context_with_cb); 558EXPORT_SYMBOL(gk20a_ce_create_context);
617 559
618void gk20a_ce_delete_context(struct gk20a *g, 560void gk20a_ce_delete_context(struct gk20a *g,
619 u32 ce_ctx_id) 561 u32 ce_ctx_id)