summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2016-10-14 11:57:05 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2016-11-07 18:47:49 -0500
commit5855fe26cb401d6d139b930ab48bb1106301585f (patch)
tree4aef6673f773db8730abfb12718dc11b1e0a27f4 /drivers/gpu/nvgpu/gk20a/channel_gk20a.c
parent5f1c2bc27fb9dd66ed046b0590afc365be5011bf (diff)
gpu: nvgpu: Do not post events to unbound channels
Change-Id: Ia1157198aad248e12e94823eb9f273497c724b2c Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1248366 Tested-by: Sachit Kadle <skadle@nvidia.com> Reviewed-by: David Martinez Nieto <dmartineznie@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c61
1 files changed, 34 insertions, 27 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 6c4b949a..16363cf1 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -361,7 +361,6 @@ static void channel_gk20a_bind(struct channel_gk20a *c)
361 gk20a_dbg_info("bind channel %d inst ptr 0x%08x", 361 gk20a_dbg_info("bind channel %d inst ptr 0x%08x",
362 c->hw_chid, inst_ptr); 362 c->hw_chid, inst_ptr);
363 363
364 c->bound = true;
365 364
366 gk20a_writel(g, ccsr_channel_r(c->hw_chid), 365 gk20a_writel(g, ccsr_channel_r(c->hw_chid),
367 (gk20a_readl(g, ccsr_channel_r(c->hw_chid)) & 366 (gk20a_readl(g, ccsr_channel_r(c->hw_chid)) &
@@ -379,6 +378,10 @@ static void channel_gk20a_bind(struct channel_gk20a *c)
379 (gk20a_readl(g, ccsr_channel_r(c->hw_chid)) & 378 (gk20a_readl(g, ccsr_channel_r(c->hw_chid)) &
380 ~ccsr_channel_enable_set_f(~0)) | 379 ~ccsr_channel_enable_set_f(~0)) |
381 ccsr_channel_enable_set_true_f()); 380 ccsr_channel_enable_set_true_f());
381
382 wmb();
383 atomic_set(&c->bound, true);
384
382} 385}
383 386
384void channel_gk20a_unbind(struct channel_gk20a *ch_gk20a) 387void channel_gk20a_unbind(struct channel_gk20a *ch_gk20a)
@@ -387,12 +390,12 @@ void channel_gk20a_unbind(struct channel_gk20a *ch_gk20a)
387 390
388 gk20a_dbg_fn(""); 391 gk20a_dbg_fn("");
389 392
390 if (ch_gk20a->bound) 393
394 if (atomic_cmpxchg(&ch_gk20a->bound, true, false)) {
391 gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->hw_chid), 395 gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->hw_chid),
392 ccsr_channel_inst_ptr_f(0) | 396 ccsr_channel_inst_ptr_f(0) |
393 ccsr_channel_inst_bind_false_f()); 397 ccsr_channel_inst_bind_false_f());
394 398 }
395 ch_gk20a->bound = false;
396} 399}
397 400
398int channel_gk20a_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) 401int channel_gk20a_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
@@ -2799,7 +2802,7 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
2799 struct channel_gk20a *c = g->fifo.channel+chid; 2802 struct channel_gk20a *c = g->fifo.channel+chid;
2800 c->g = NULL; 2803 c->g = NULL;
2801 c->hw_chid = chid; 2804 c->hw_chid = chid;
2802 c->bound = false; 2805 atomic_set(&c->bound, false);
2803 spin_lock_init(&c->ref_obtain_lock); 2806 spin_lock_init(&c->ref_obtain_lock);
2804 atomic_set(&c->ref_count, 0); 2807 atomic_set(&c->ref_count, 0);
2805 c->referenceable = false; 2808 c->referenceable = false;
@@ -3328,30 +3331,34 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
3328 for (chid = 0; chid < f->num_channels; chid++) { 3331 for (chid = 0; chid < f->num_channels; chid++) {
3329 struct channel_gk20a *c = g->fifo.channel+chid; 3332 struct channel_gk20a *c = g->fifo.channel+chid;
3330 if (gk20a_channel_get(c)) { 3333 if (gk20a_channel_get(c)) {
3331 wake_up_interruptible_all(&c->semaphore_wq); 3334 if (atomic_read(&c->bound)) {
3332 if (post_events) { 3335 wake_up_interruptible_all(&c->semaphore_wq);
3333 if (gk20a_is_channel_marked_as_tsg(c)) { 3336 if (post_events) {
3334 struct tsg_gk20a *tsg = 3337 if (gk20a_is_channel_marked_as_tsg(c)) {
3335 &g->fifo.tsg[c->tsgid]; 3338 struct tsg_gk20a *tsg =
3336 3339 &g->fifo.tsg[c->tsgid];
3337 gk20a_tsg_event_id_post_event(tsg, 3340
3338 NVGPU_IOCTL_CHANNEL_EVENT_ID_BLOCKING_SYNC); 3341 gk20a_tsg_event_id_post_event(tsg,
3339 } else { 3342 NVGPU_IOCTL_CHANNEL_EVENT_ID_BLOCKING_SYNC);
3340 gk20a_channel_event_id_post_event(c, 3343 } else {
3341 NVGPU_IOCTL_CHANNEL_EVENT_ID_BLOCKING_SYNC); 3344 gk20a_channel_event_id_post_event(c,
3345 NVGPU_IOCTL_CHANNEL_EVENT_ID_BLOCKING_SYNC);
3346 }
3342 } 3347 }
3348 /*
3349 * Only non-deterministic channels get the
3350 * channel_update callback. We don't allow
3351 * semaphore-backed syncs for these channels
3352 * anyways, since they have a dependency on
3353 * the sync framework.
3354 * If deterministic channels are receiving a
3355 * semaphore wakeup, it must be for a
3356 * user-space managed
3357 * semaphore.
3358 */
3359 if (!c->deterministic)
3360 gk20a_channel_update(c, 0);
3343 } 3361 }
3344 /*
3345 * Only non-deterministic channels get the
3346 * channel_update callback. We don't allow
3347 * semaphore-backed syncs for these channels anyways,
3348 * since they have a dependency on the sync framework.
3349 * If deterministic channels are receiving a semaphore
3350 * wakeup, it must be for a user-space managed
3351 * semaphore.
3352 */
3353 if (!c->deterministic)
3354 gk20a_channel_update(c, 0);
3355 gk20a_channel_put(c); 3362 gk20a_channel_put(c);
3356 } 3363 }
3357 } 3364 }