diff options
author | Konsta Holtta <kholtta@nvidia.com> | 2014-11-11 04:23:45 -0500 |
---|---|---|
committer | Dan Willemsen <dwillemsen@nvidia.com> | 2015-03-18 15:12:13 -0400 |
commit | 0e112b6b4b9c937fbeb1b71d28a43f4fadc6abb6 (patch) | |
tree | 4d28c9a28e4e6f5a471b6079a559e0f85bae8da0 /drivers/gpu | |
parent | 6a4597d5cd80ec47e62429cdf350f88a6707f448 (diff) |
gpu: nvgpu: cde: do not rearm deleter on failure
Rescheduling the temp context deleter when it is not immediately
possible is not necessary, and complicates things. Don't do it.
The context would anyway be used later when its time comes in the free
list, and the deletion would then be retried.
This simplifies canceling the works when shutting down or going into
suspend, since re-canceling the possibly rescheduled work is not needed.
Releasing the app mutex is still necessary when deleting the whole cde.
Bug 200052943
Change-Id: I06afe1766097a78d7bcb93f3140855799ac903ca
Signed-off-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-on: http://git-master/r/601035
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/cde_gk20a.c | 41 |
1 files changed, 19 insertions, 22 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c index c4793335..c5368e46 100644 --- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c | |||
@@ -91,7 +91,8 @@ __must_hold(&cde_app->mutex) | |||
91 | kfree(cde_ctx); | 91 | kfree(cde_ctx); |
92 | } | 92 | } |
93 | 93 | ||
94 | static void gk20a_cde_cancel_deleter(struct gk20a_cde_ctx *cde_ctx) | 94 | static void gk20a_cde_cancel_deleter(struct gk20a_cde_ctx *cde_ctx, |
95 | bool wait_finish) | ||
95 | __releases(&cde_app->mutex) | 96 | __releases(&cde_app->mutex) |
96 | __acquires(&cde_app->mutex) | 97 | __acquires(&cde_app->mutex) |
97 | { | 98 | { |
@@ -101,14 +102,13 @@ __acquires(&cde_app->mutex) | |||
101 | if (!cde_ctx->is_temporary) | 102 | if (!cde_ctx->is_temporary) |
102 | return; | 103 | return; |
103 | 104 | ||
104 | mutex_unlock(&cde_app->mutex); | 105 | if (wait_finish) { |
105 | 106 | mutex_unlock(&cde_app->mutex); | |
106 | /* the deleter can rearm itself */ | ||
107 | while (delayed_work_pending(&cde_ctx->ctx_deleter_work)) { | ||
108 | cancel_delayed_work_sync(&cde_ctx->ctx_deleter_work); | 107 | cancel_delayed_work_sync(&cde_ctx->ctx_deleter_work); |
108 | mutex_lock(&cde_app->mutex); | ||
109 | } else { | ||
110 | cancel_delayed_work(&cde_ctx->ctx_deleter_work); | ||
109 | } | 111 | } |
110 | |||
111 | mutex_lock(&cde_app->mutex); | ||
112 | } | 112 | } |
113 | 113 | ||
114 | static void gk20a_cde_remove_contexts(struct gk20a *g) | 114 | static void gk20a_cde_remove_contexts(struct gk20a *g) |
@@ -123,13 +123,13 @@ __must_hold(&cde_app->mutex) | |||
123 | 123 | ||
124 | list_for_each_entry_safe(cde_ctx, cde_ctx_save, | 124 | list_for_each_entry_safe(cde_ctx, cde_ctx_save, |
125 | &cde_app->free_contexts, list) { | 125 | &cde_app->free_contexts, list) { |
126 | gk20a_cde_cancel_deleter(cde_ctx); | 126 | gk20a_cde_cancel_deleter(cde_ctx, true); |
127 | gk20a_cde_remove_ctx(cde_ctx); | 127 | gk20a_cde_remove_ctx(cde_ctx); |
128 | } | 128 | } |
129 | 129 | ||
130 | list_for_each_entry_safe(cde_ctx, cde_ctx_save, | 130 | list_for_each_entry_safe(cde_ctx, cde_ctx_save, |
131 | &cde_app->used_contexts, list) { | 131 | &cde_app->used_contexts, list) { |
132 | gk20a_cde_cancel_deleter(cde_ctx); | 132 | gk20a_cde_cancel_deleter(cde_ctx, true); |
133 | gk20a_cde_remove_ctx(cde_ctx); | 133 | gk20a_cde_remove_ctx(cde_ctx); |
134 | } | 134 | } |
135 | } | 135 | } |
@@ -173,14 +173,12 @@ __releases(&cde_app->mutex) | |||
173 | 173 | ||
174 | list_for_each_entry_safe(cde_ctx, cde_ctx_save, | 174 | list_for_each_entry_safe(cde_ctx, cde_ctx_save, |
175 | &cde_app->free_contexts, list) { | 175 | &cde_app->free_contexts, list) { |
176 | if (cde_ctx->is_temporary) | 176 | gk20a_cde_cancel_deleter(cde_ctx, false); |
177 | cancel_delayed_work(&cde_ctx->ctx_deleter_work); | ||
178 | } | 177 | } |
179 | 178 | ||
180 | list_for_each_entry_safe(cde_ctx, cde_ctx_save, | 179 | list_for_each_entry_safe(cde_ctx, cde_ctx_save, |
181 | &cde_app->used_contexts, list) { | 180 | &cde_app->used_contexts, list) { |
182 | if (cde_ctx->is_temporary) | 181 | gk20a_cde_cancel_deleter(cde_ctx, false); |
183 | cancel_delayed_work(&cde_ctx->ctx_deleter_work); | ||
184 | } | 182 | } |
185 | 183 | ||
186 | mutex_unlock(&cde_app->mutex); | 184 | mutex_unlock(&cde_app->mutex); |
@@ -720,7 +718,7 @@ static int gk20a_cde_execute_buffer(struct gk20a_cde_ctx *cde_ctx, | |||
720 | num_entries, flags, fence, fence_out); | 718 | num_entries, flags, fence, fence_out); |
721 | } | 719 | } |
722 | 720 | ||
723 | static void gk20a_ctx_release(struct gk20a_cde_ctx *cde_ctx) | 721 | static void gk20a_cde_ctx_release(struct gk20a_cde_ctx *cde_ctx) |
724 | __acquires(&cde_app->mutex) | 722 | __acquires(&cde_app->mutex) |
725 | __releases(&cde_app->mutex) | 723 | __releases(&cde_app->mutex) |
726 | { | 724 | { |
@@ -755,16 +753,15 @@ __releases(&cde_app->mutex) | |||
755 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, | 753 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, |
756 | "cde: attempting to delete temporary %p", cde_ctx); | 754 | "cde: attempting to delete temporary %p", cde_ctx); |
757 | 755 | ||
758 | /* this should fail only when shutting down the whole device */ | ||
759 | err = gk20a_busy(pdev); | 756 | err = gk20a_busy(pdev); |
760 | if (WARN(err, "gk20a cde: cannot set gk20a on, not freeing channel yet." | 757 | if (err) { |
761 | " rescheduling...")) { | 758 | /* this context would find new use anyway later, so not freeing |
762 | schedule_delayed_work(&cde_ctx->ctx_deleter_work, | 759 | * here does not leak anything */ |
763 | msecs_to_jiffies(CTX_DELETE_TIME)); | 760 | gk20a_warn(&pdev->dev, "cde: cannot set gk20a on, postponing" |
761 | " temp ctx deletion"); | ||
764 | return; | 762 | return; |
765 | } | 763 | } |
766 | 764 | ||
767 | /* mark so that nobody else assumes it's free to take */ | ||
768 | mutex_lock(&cde_app->mutex); | 765 | mutex_lock(&cde_app->mutex); |
769 | if (cde_ctx->in_use || !cde_app->initialised) { | 766 | if (cde_ctx->in_use || !cde_app->initialised) { |
770 | gk20a_dbg(gpu_dbg_cde_ctx, | 767 | gk20a_dbg(gpu_dbg_cde_ctx, |
@@ -809,7 +806,7 @@ __must_hold(&cde_app->mutex) | |||
809 | 806 | ||
810 | /* cancel any deletions now that ctx is in use */ | 807 | /* cancel any deletions now that ctx is in use */ |
811 | if (delayed_work_pending(&cde_ctx->ctx_deleter_work)) | 808 | if (delayed_work_pending(&cde_ctx->ctx_deleter_work)) |
812 | gk20a_cde_cancel_deleter(cde_ctx); | 809 | gk20a_cde_cancel_deleter(cde_ctx, false); |
813 | return cde_ctx; | 810 | return cde_ctx; |
814 | } | 811 | } |
815 | 812 | ||
@@ -1035,7 +1032,7 @@ __releases(&cde_app->mutex) | |||
1035 | mutex_unlock(&cde_app->mutex); | 1032 | mutex_unlock(&cde_app->mutex); |
1036 | } | 1033 | } |
1037 | } else { | 1034 | } else { |
1038 | gk20a_ctx_release(cde_ctx); | 1035 | gk20a_cde_ctx_release(cde_ctx); |
1039 | } | 1036 | } |
1040 | 1037 | ||
1041 | /* delete temporary contexts later */ | 1038 | /* delete temporary contexts later */ |