summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2015-03-06 09:33:43 -0500
committerTerje Bergstrom <tbergstrom@nvidia.com>2015-06-09 14:13:43 -0400
commit6085c90f499c642bc41a646b0efbdfe60e096c74 (patch)
tree0eaab99b228ce162ec3a44d0f8138b441f5a64f4 /drivers/gpu/nvgpu/gk20a/channel_gk20a.h
parenta41e5c41cadaa3d030a1f75b09328b8b1a440b69 (diff)
gpu: nvgpu: add per-channel refcounting
Add reference counting for channels, and wait for reference count to get to 0 in gk20a_channel_free() before actually freeing the channel. Also, change free channel tracking a bit by employing a list of free channels, which simplifies the procedure of finding available channels with reference counting. Each use of a channel must have a reference taken before use or held by the caller. Taking a reference of a wild channel pointer may fail, if the channel is either not opened or in a process of being closed. Also, add safeguards for protecting accidental use of closed channels, specifically, by setting ch->g = NULL in channel free. This will make it obvious if freed channel is attempted to be used. The last user of a channel might be the deferred interrupt handler, so wait for deferred interrupts to be processed twice in the channel free procedure: once for providing last notifications to the channel and once to make sure there are no stale pointers left after referencing to the channel has been denied. Finally, fix some races in channel and TSG force reset IOCTL path, by pausing the channel scheduler in gk20a_fifo_recover_ch() and gk20a_fifo_recover_tsg(), while the affected engines have been identified, the appropriate MMU faults triggered, and the MMU faults handled. In this case, make sure that the MMU fault does not attempt to query the hardware about the failing channel or TSG ids. This should make channel recovery more safe also in the regular (i.e., not in the interrupt handler) context. Bug 1530226 Bug 1597493 Bug 1625901 Bug 200076344 Bug 200071810 Change-Id: Ib274876908e18219c64ea41e50ca443df81d957b Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Signed-off-by: Sami Kiminki <skiminki@nvidia.com> Reviewed-on: http://git-master/r/448463 (cherry picked from commit 3f03aeae64ef2af4829e06f5f63062e8ebd21353) Reviewed-on: http://git-master/r/755147 Reviewed-by: Automatic_Commit_Validation_User
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.h')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.h32
1 files changed, 26 insertions, 6 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
index f022fe36..2ea5b4be 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
@@ -19,12 +19,13 @@
19#define CHANNEL_GK20A_H 19#define CHANNEL_GK20A_H
20 20
21#include <linux/log2.h> 21#include <linux/log2.h>
22#include <linux/slab.h>
23#include <linux/wait.h>
24#include <linux/mutex.h> 22#include <linux/mutex.h>
25#include <uapi/linux/nvgpu.h>
26#include <linux/poll.h> 23#include <linux/poll.h>
24#include <linux/semaphore.h>
25#include <linux/slab.h>
27#include <linux/spinlock.h> 26#include <linux/spinlock.h>
27#include <linux/wait.h>
28#include <uapi/linux/nvgpu.h>
28 29
29struct gk20a; 30struct gk20a;
30struct gr_gk20a; 31struct gr_gk20a;
@@ -77,8 +78,15 @@ struct channel_gk20a_poll_events {
77 78
78/* this is the priv element of struct nvhost_channel */ 79/* this is the priv element of struct nvhost_channel */
79struct channel_gk20a { 80struct channel_gk20a {
80 struct gk20a *g; 81 struct gk20a *g; /* set only when channel is active */
81 bool in_use; 82
83 struct list_head free_chs;
84
85 spinlock_t ref_obtain_lock;
86 bool referenceable;
87 atomic_t ref_count;
88 wait_queue_head_t ref_count_dec_wq;
89
82 int hw_chid; 90 int hw_chid;
83 bool bound; 91 bool bound;
84 bool first_init; 92 bool first_init;
@@ -171,7 +179,10 @@ static inline bool gk20a_channel_as_bound(struct channel_gk20a *ch)
171} 179}
172int channel_gk20a_commit_va(struct channel_gk20a *c); 180int channel_gk20a_commit_va(struct channel_gk20a *c);
173int gk20a_init_channel_support(struct gk20a *, u32 chid); 181int gk20a_init_channel_support(struct gk20a *, u32 chid);
174void gk20a_free_channel(struct channel_gk20a *ch, bool finish); 182
183/* must be inside gk20a_busy()..gk20a_idle() */
184void gk20a_channel_close(struct channel_gk20a *ch);
185
175bool gk20a_channel_update_and_check_timeout(struct channel_gk20a *ch, 186bool gk20a_channel_update_and_check_timeout(struct channel_gk20a *ch,
176 u32 timeout_delta_ms); 187 u32 timeout_delta_ms);
177void gk20a_disable_channel(struct channel_gk20a *ch, 188void gk20a_disable_channel(struct channel_gk20a *ch,
@@ -202,6 +213,15 @@ void gk20a_channel_event(struct channel_gk20a *ch);
202 213
203void gk20a_init_channel(struct gpu_ops *gops); 214void gk20a_init_channel(struct gpu_ops *gops);
204 215
216/* returns ch if reference was obtained */
217struct channel_gk20a *__must_check _gk20a_channel_get(struct channel_gk20a *ch,
218 const char *caller);
219#define gk20a_channel_get(ch) _gk20a_channel_get(ch, __func__)
220
221
222void _gk20a_channel_put(struct channel_gk20a *ch, const char *caller);
223#define gk20a_channel_put(ch) _gk20a_channel_put(ch, __func__)
224
205int gk20a_wait_channel_idle(struct channel_gk20a *ch); 225int gk20a_wait_channel_idle(struct channel_gk20a *ch);
206struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g); 226struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g);
207struct channel_gk20a *gk20a_open_new_channel_with_cb(struct gk20a *g, 227struct channel_gk20a *gk20a_open_new_channel_with_cb(struct gk20a *g,