From 468d6888fc6db207cb239e270752f3f8f2f0ed87 Mon Sep 17 00:00:00 2001 From: Deepak Nibade Date: Fri, 31 Mar 2017 15:56:25 +0530 Subject: gpu: nvgpu: use nvgpu list for CDE contexts Use nvgpu list APIs instead of linux list APIs to store CDE contexts in free_contexts/used_contexts lists Jira NVGPU-13 Change-Id: If1c5d8d8ca70afc90379b33232ceccf9ac4fb155 Signed-off-by: Deepak Nibade Reviewed-on: http://git-master/r/1454009 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: svccoveritychecker GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom --- drivers/gpu/nvgpu/gk20a/cde_gk20a.c | 38 ++++++++++++++++++------------------- drivers/gpu/nvgpu/gk20a/cde_gk20a.h | 13 ++++++++++--- 2 files changed, 29 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c index 02636206..42850588 100644 --- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c @@ -85,7 +85,7 @@ __must_hold(&cde_app->mutex) gk20a_channel_close(ch); /* housekeeping on app */ - list_del(&cde_ctx->list); + nvgpu_list_del(&cde_ctx->list); cde_ctx->g->cde_app.ctx_count--; nvgpu_kfree(g, cde_ctx); } @@ -120,14 +120,14 @@ __must_hold(&cde_app->mutex) * deinitialised; no new jobs are started. deleter works may be only at * waiting for the mutex or before, going to abort */ - list_for_each_entry_safe(cde_ctx, cde_ctx_save, - &cde_app->free_contexts, list) { + nvgpu_list_for_each_entry_safe(cde_ctx, cde_ctx_save, + &cde_app->free_contexts, gk20a_cde_ctx, list) { gk20a_cde_cancel_deleter(cde_ctx, true); gk20a_cde_remove_ctx(cde_ctx); } - list_for_each_entry_safe(cde_ctx, cde_ctx_save, - &cde_app->used_contexts, list) { + nvgpu_list_for_each_entry_safe(cde_ctx, cde_ctx_save, + &cde_app->used_contexts, gk20a_cde_ctx, list) { gk20a_cde_cancel_deleter(cde_ctx, true); gk20a_cde_remove_ctx(cde_ctx); } @@ -172,13 +172,13 @@ __releases(&cde_app->mutex) nvgpu_mutex_acquire(&cde_app->mutex); - list_for_each_entry_safe(cde_ctx, cde_ctx_save, - &cde_app->free_contexts, list) { + nvgpu_list_for_each_entry_safe(cde_ctx, cde_ctx_save, + &cde_app->free_contexts, gk20a_cde_ctx, list) { gk20a_cde_cancel_deleter(cde_ctx, false); } - list_for_each_entry_safe(cde_ctx, cde_ctx_save, - &cde_app->used_contexts, list) { + nvgpu_list_for_each_entry_safe(cde_ctx, cde_ctx_save, + &cde_app->used_contexts, gk20a_cde_ctx, list) { gk20a_cde_cancel_deleter(cde_ctx, false); } @@ -196,7 +196,7 @@ __must_hold(&cde_app->mutex) if (IS_ERR(cde_ctx)) return PTR_ERR(cde_ctx); - list_add(&cde_ctx->list, &cde_app->free_contexts); + nvgpu_list_add(&cde_ctx->list, &cde_app->free_contexts); cde_app->ctx_count++; if (cde_app->ctx_count > cde_app->ctx_count_top) cde_app->ctx_count_top = cde_app->ctx_count; @@ -747,7 +747,7 @@ __releases(&cde_app->mutex) if (cde_ctx->in_use) { cde_ctx->in_use = false; - list_move(&cde_ctx->list, &cde_app->free_contexts); + nvgpu_list_move(&cde_ctx->list, &cde_app->free_contexts); cde_app->ctx_usecount--; } else { gk20a_dbg_info("double release cde context %p", cde_ctx); @@ -819,9 +819,9 @@ __must_hold(&cde_app->mutex) /* idle context available? */ - if (!list_empty(&cde_app->free_contexts)) { - cde_ctx = list_first_entry(&cde_app->free_contexts, - struct gk20a_cde_ctx, list); + if (!nvgpu_list_empty(&cde_app->free_contexts)) { + cde_ctx = nvgpu_list_first_entry(&cde_app->free_contexts, + gk20a_cde_ctx, list); gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: got free %p count=%d use=%d max=%d", cde_ctx, cde_app->ctx_count, @@ -831,7 +831,7 @@ __must_hold(&cde_app->mutex) /* deleter work may be scheduled, but in_use prevents it */ cde_ctx->in_use = true; - list_move(&cde_ctx->list, &cde_app->used_contexts); + nvgpu_list_move(&cde_ctx->list, &cde_app->used_contexts); cde_app->ctx_usecount++; /* cancel any deletions now that ctx is in use */ @@ -859,7 +859,7 @@ __must_hold(&cde_app->mutex) cde_app->ctx_count++; if (cde_app->ctx_count > cde_app->ctx_count_top) cde_app->ctx_count_top = cde_app->ctx_count; - list_add(&cde_ctx->list, &cde_app->used_contexts); + nvgpu_list_add(&cde_ctx->list, &cde_app->used_contexts); return cde_ctx; } @@ -907,7 +907,7 @@ static struct gk20a_cde_ctx *gk20a_cde_allocate_context(struct gk20a *g) return ERR_PTR(ret); } - INIT_LIST_HEAD(&cde_ctx->list); + nvgpu_init_list_node(&cde_ctx->list); cde_ctx->is_temporary = false; cde_ctx->in_use = false; INIT_DELAYED_WORK(&cde_ctx->ctx_deleter_work, @@ -1321,8 +1321,8 @@ __releases(&cde_app->mutex) nvgpu_mutex_acquire(&cde_app->mutex); - INIT_LIST_HEAD(&cde_app->free_contexts); - INIT_LIST_HEAD(&cde_app->used_contexts); + nvgpu_init_list_node(&cde_app->free_contexts); + nvgpu_init_list_node(&cde_app->used_contexts); cde_app->ctx_count = 0; cde_app->ctx_count_top = 0; cde_app->ctx_usecount = 0; diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.h b/drivers/gpu/nvgpu/gk20a/cde_gk20a.h index 1136b0ad..a36f2401 100644 --- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.h @@ -254,18 +254,25 @@ struct gk20a_cde_ctx { bool init_cmd_executed; - struct list_head list; + struct nvgpu_list_node list; bool is_temporary; bool in_use; struct delayed_work ctx_deleter_work; }; +static inline struct gk20a_cde_ctx * +gk20a_cde_ctx_from_list(struct nvgpu_list_node *node) +{ + return (struct gk20a_cde_ctx *) + ((uintptr_t)node - offsetof(struct gk20a_cde_ctx, list)); +}; + struct gk20a_cde_app { bool initialised; struct nvgpu_mutex mutex; - struct list_head free_contexts; - struct list_head used_contexts; + struct nvgpu_list_node free_contexts; + struct nvgpu_list_node used_contexts; unsigned int ctx_count; unsigned int ctx_usecount; unsigned int ctx_count_top; -- cgit v1.2.2