From 760f8dd7fbbfaee7607d10c38fe013f9cb069cd7 Mon Sep 17 00:00:00 2001 From: Konsta Holtta Date: Tue, 31 Oct 2017 13:07:08 +0200 Subject: gpu: nvgpu: drop user callback support in CE Simplify the copyengine code by deleting support for the ce_event_callback feature that has never been used. Similarly, create a channel without the finish callback to get rid of that Linux dependency, and delete the finish callback function as it now serves no purpose. Delete also the submitted_seq_number and completed_seq_number fields that are only written to. Jira NVGPU-259 Change-Id: I02d15bdcb546f4dd8895a6bfb5130caf88a104e2 Signed-off-by: Konsta Holtta Reviewed-on: https://git-master.nvidia.com/r/1589320 Reviewed-by: svc-mobile-coverity GVS: Gerrit_Virtual_Submit Reviewed-by: Deepak Nibade Reviewed-by: Alex Waterman Reviewed-by: Terje Bergstrom Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/linux/ce2.c | 1 - drivers/gpu/nvgpu/common/mm/mm.c | 5 ++- drivers/gpu/nvgpu/gk20a/ce2_gk20a.c | 66 +++--------------------------------- drivers/gpu/nvgpu/gk20a/ce2_gk20a.h | 19 ++--------- 4 files changed, 8 insertions(+), 83 deletions(-) diff --git a/drivers/gpu/nvgpu/common/linux/ce2.c b/drivers/gpu/nvgpu/common/linux/ce2.c index 3fee23e5..5127a32b 100644 --- a/drivers/gpu/nvgpu/common/linux/ce2.c +++ b/drivers/gpu/nvgpu/common/linux/ce2.c @@ -173,7 +173,6 @@ int gk20a_ce_execute_ops(struct gk20a *g, /* Next available command buffer queue Index */ ++ce_ctx->cmd_buf_read_queue_offset; - ++ce_ctx->submitted_seq_number; } } else { ret = -ENOMEM; diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c index cb7c443d..73af31c9 100644 --- a/drivers/gpu/nvgpu/common/mm/mm.c +++ b/drivers/gpu/nvgpu/common/mm/mm.c @@ -259,12 +259,11 @@ void nvgpu_init_mm_ce_context(struct gk20a *g) #if defined(CONFIG_GK20A_VIDMEM) if (g->mm.vidmem.size && (g->mm.vidmem.ce_ctx_id == (u32)~0)) { g->mm.vidmem.ce_ctx_id = - gk20a_ce_create_context_with_cb(g, + gk20a_ce_create_context(g, gk20a_fifo_get_fast_ce_runlist_id(g), -1, -1, - -1, - NULL); + -1); if (g->mm.vidmem.ce_ctx_id == (u32)~0) nvgpu_err(g, diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c index 9ff6c792..04ee84f4 100644 --- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c @@ -103,54 +103,6 @@ int gk20a_ce2_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base) } /* static CE app api */ -static void gk20a_ce_notify_all_user(struct gk20a *g, u32 event) -{ - struct gk20a_ce_app *ce_app = &g->ce_app; - struct gk20a_gpu_ctx *ce_ctx, *ce_ctx_save; - - if (!ce_app->initialised) - return; - - nvgpu_mutex_acquire(&ce_app->app_mutex); - - nvgpu_list_for_each_entry_safe(ce_ctx, ce_ctx_save, - &ce_app->allocated_contexts, gk20a_gpu_ctx, list) { - if (ce_ctx->user_event_callback) { - ce_ctx->user_event_callback(ce_ctx->ctx_id, - event); - } - } - - nvgpu_mutex_release(&ce_app->app_mutex); -} - -static void gk20a_ce_finished_ctx_cb(struct channel_gk20a *ch, void *data) -{ - struct gk20a_gpu_ctx *ce_ctx = data; - bool channel_idle; - u32 event; - - channel_gk20a_joblist_lock(ch); - channel_idle = channel_gk20a_joblist_is_empty(ch); - channel_gk20a_joblist_unlock(ch); - - if (!channel_idle) - return; - - gk20a_dbg(gpu_dbg_fn, "ce: finished %p", ce_ctx); - - if (ch->has_timedout) - event = NVGPU_CE_CONTEXT_JOB_TIMEDOUT; - else - event = NVGPU_CE_CONTEXT_JOB_COMPLETED; - - if (ce_ctx->user_event_callback) - ce_ctx->user_event_callback(ce_ctx->ctx_id, - event); - - ++ce_ctx->completed_seq_number; -} - static void gk20a_ce_free_command_buffer_stored_fence(struct gk20a_gpu_ctx *ce_ctx) { u32 cmd_buf_index; @@ -410,7 +362,6 @@ int gk20a_init_ce_support(struct gk20a *g) if (ce_app->initialised) { /* assume this happen during poweron/poweroff GPU sequence */ ce_app->app_state = NVGPU_CE_ACTIVE; - gk20a_ce_notify_all_user(g, NVGPU_CE_CONTEXT_RESUME); return 0; } @@ -469,18 +420,16 @@ void gk20a_ce_suspend(struct gk20a *g) return; ce_app->app_state = NVGPU_CE_SUSPEND; - gk20a_ce_notify_all_user(g, NVGPU_CE_CONTEXT_SUSPEND); return; } /* CE app utility functions */ -u32 gk20a_ce_create_context_with_cb(struct gk20a *g, +u32 gk20a_ce_create_context(struct gk20a *g, int runlist_id, int priority, int timeslice, - int runlist_level, - ce_event_callback user_event_callback) + int runlist_level) { struct gk20a_gpu_ctx *ce_ctx; struct gk20a_ce_app *ce_app = &g->ce_app; @@ -501,15 +450,11 @@ u32 gk20a_ce_create_context_with_cb(struct gk20a *g, } ce_ctx->g = g; - ce_ctx->user_event_callback = user_event_callback; ce_ctx->cmd_buf_read_queue_offset = 0; ce_ctx->cmd_buf_end_queue_offset = (NVGPU_CE_COMMAND_BUF_SIZE / NVGPU_CE_MAX_COMMAND_BUFF_SIZE_PER_KICKOFF); - ce_ctx->submitted_seq_number = 0; - ce_ctx->completed_seq_number = 0; - ce_ctx->vm = g->mm.ce.vm; if (nvgpu_is_enabled(g, NVGPU_MM_CE_TSG_REQUIRED)) { @@ -523,10 +468,7 @@ u32 gk20a_ce_create_context_with_cb(struct gk20a *g, } /* always kernel client needs privileged channel */ - ce_ctx->ch = gk20a_open_new_channel_with_cb(g, gk20a_ce_finished_ctx_cb, - ce_ctx, - runlist_id, - true); + ce_ctx->ch = gk20a_open_new_channel(g, runlist_id, true); if (!ce_ctx->ch) { nvgpu_err(g, "ce: gk20a channel not available"); goto end; @@ -613,7 +555,7 @@ end: return ctx_id; } -EXPORT_SYMBOL(gk20a_ce_create_context_with_cb); +EXPORT_SYMBOL(gk20a_ce_create_context); void gk20a_ce_delete_context(struct gk20a *g, u32 ce_ctx_id) diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.h b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.h index 8d3a4ca3..5c3a1cfc 100644 --- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.h @@ -40,8 +40,6 @@ int gk20a_ce2_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base); #define NVGPU_CE_MAX_COMMAND_BUFF_SIZE_PER_KICKOFF 256 #define NVGPU_CE_MAX_COMMAND_BUFF_SIZE_FOR_TRACING 8 -typedef void (*ce_event_callback)(u32 ce_ctx_id, u32 ce_event_flag); - /* dma launch_flags */ enum { /* location */ @@ -69,14 +67,6 @@ enum { NVGPU_CE_MEMSET = (1 << 1), }; -/* CE event flags */ -enum { - NVGPU_CE_CONTEXT_JOB_COMPLETED = (1 << 0), - NVGPU_CE_CONTEXT_JOB_TIMEDOUT = (1 << 1), - NVGPU_CE_CONTEXT_SUSPEND = (1 << 2), - NVGPU_CE_CONTEXT_RESUME = (1 << 3), -}; - /* CE app state machine flags */ enum { NVGPU_CE_ACTIVE = (1 << 0), @@ -106,7 +96,6 @@ struct gk20a_gpu_ctx { u32 ctx_id; struct nvgpu_mutex gpu_ctx_mutex; int gpu_ctx_state; - ce_event_callback user_event_callback; /* tsg related data */ struct tsg_gk20a *tsg; @@ -120,9 +109,6 @@ struct gk20a_gpu_ctx { struct nvgpu_list_node list; - u64 submitted_seq_number; - u64 completed_seq_number; - u32 cmd_buf_read_queue_offset; u32 cmd_buf_end_queue_offset; }; @@ -140,12 +126,11 @@ void gk20a_ce_suspend(struct gk20a *g); void gk20a_ce_destroy(struct gk20a *g); /* CE app utility functions */ -u32 gk20a_ce_create_context_with_cb(struct gk20a *g, +u32 gk20a_ce_create_context(struct gk20a *g, int runlist_id, int priority, int timeslice, - int runlist_level, - ce_event_callback user_event_callback); + int runlist_level); int gk20a_ce_execute_ops(struct gk20a *g, u32 ce_ctx_id, u64 src_buf, -- cgit v1.2.2