diff options
author | Konsta Holtta <kholtta@nvidia.com> | 2014-11-12 07:05:02 -0500 |
---|---|---|
committer | Dan Willemsen <dwillemsen@nvidia.com> | 2015-03-18 15:12:15 -0400 |
commit | cd072a192b0de73c512dfa3ca9ed4b5c25f10119 (patch) | |
tree | d178d051f98dae7f9352d211023aabfd696023e7 | |
parent | 1d9fba8804fb811771eac0f68f334f51f101ed01 (diff) |
gpu: nvgpu: cde: restrict context count
Add an upper limit for cde contexts, and wait for a while if a new
context is queried and the limit has been exceeded. This happens only
under very high load. If the timeout is exceeded, report -EAGAIN.
Change-Id: I1fa47ad6cddf620eae00cea16ecea36cf4151cab
Signed-off-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-on: http://git-master/r/601719
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/cde_gk20a.c | 32 |
1 files changed, 31 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c index 65d6dd4d..c96225c7 100644 --- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c | |||
@@ -39,6 +39,9 @@ static struct gk20a_cde_ctx *gk20a_cde_allocate_context(struct gk20a *g); | |||
39 | 39 | ||
40 | #define CTX_DELETE_TIME 1000 | 40 | #define CTX_DELETE_TIME 1000 |
41 | 41 | ||
42 | #define MAX_CTX_USE_COUNT 42 | ||
43 | #define MAX_CTX_RETRY_TIME 2000 | ||
44 | |||
42 | static void gk20a_deinit_cde_img(struct gk20a_cde_ctx *cde_ctx) | 45 | static void gk20a_deinit_cde_img(struct gk20a_cde_ctx *cde_ctx) |
43 | { | 46 | { |
44 | struct device *dev = &cde_ctx->pdev->dev; | 47 | struct device *dev = &cde_ctx->pdev->dev; |
@@ -782,12 +785,17 @@ out: | |||
782 | gk20a_idle(pdev); | 785 | gk20a_idle(pdev); |
783 | } | 786 | } |
784 | 787 | ||
785 | static struct gk20a_cde_ctx *gk20a_cde_get_context(struct gk20a *g) | 788 | static struct gk20a_cde_ctx *gk20a_cde_do_get_context(struct gk20a *g) |
786 | __must_hold(&cde_app->mutex) | 789 | __must_hold(&cde_app->mutex) |
787 | { | 790 | { |
788 | struct gk20a_cde_app *cde_app = &g->cde_app; | 791 | struct gk20a_cde_app *cde_app = &g->cde_app; |
789 | struct gk20a_cde_ctx *cde_ctx; | 792 | struct gk20a_cde_ctx *cde_ctx; |
790 | 793 | ||
794 | /* exhausted? */ | ||
795 | |||
796 | if (cde_app->ctx_usecount >= MAX_CTX_USE_COUNT) | ||
797 | return ERR_PTR(-EAGAIN); | ||
798 | |||
791 | /* idle context available? */ | 799 | /* idle context available? */ |
792 | 800 | ||
793 | if (!list_empty(&cde_app->free_contexts)) { | 801 | if (!list_empty(&cde_app->free_contexts)) { |
@@ -834,6 +842,28 @@ __must_hold(&cde_app->mutex) | |||
834 | return cde_ctx; | 842 | return cde_ctx; |
835 | } | 843 | } |
836 | 844 | ||
845 | static struct gk20a_cde_ctx *gk20a_cde_get_context(struct gk20a *g) | ||
846 | __releases(&cde_app->mutex) | ||
847 | __acquires(&cde_app->mutex) | ||
848 | { | ||
849 | struct gk20a_cde_app *cde_app = &g->cde_app; | ||
850 | struct gk20a_cde_ctx *cde_ctx = NULL; | ||
851 | unsigned long end = jiffies + msecs_to_jiffies(MAX_CTX_RETRY_TIME); | ||
852 | |||
853 | do { | ||
854 | cde_ctx = gk20a_cde_do_get_context(g); | ||
855 | if (PTR_ERR(cde_ctx) != -EAGAIN) | ||
856 | break; | ||
857 | |||
858 | /* exhausted, retry */ | ||
859 | mutex_unlock(&cde_app->mutex); | ||
860 | cond_resched(); | ||
861 | mutex_lock(&cde_app->mutex); | ||
862 | } while (time_before(jiffies, end)); | ||
863 | |||
864 | return cde_ctx; | ||
865 | } | ||
866 | |||
837 | static struct gk20a_cde_ctx *gk20a_cde_allocate_context(struct gk20a *g) | 867 | static struct gk20a_cde_ctx *gk20a_cde_allocate_context(struct gk20a *g) |
838 | { | 868 | { |
839 | struct gk20a_cde_ctx *cde_ctx; | 869 | struct gk20a_cde_ctx *cde_ctx; |