summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
diff options
context:
space:
mode:
authorDebarshi Dutta <ddutta@nvidia.com>2017-08-03 06:04:44 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-08-17 17:26:47 -0400
commit98186ec2c2127c2af65a34f9e697e04f518a79ab (patch)
tree08ad87f3bf8c739e96b36f01728a8f7a30749a0e /drivers/gpu/nvgpu/gk20a/channel_gk20a.c
parent49dc335cfe588179cbb42d8bab53bc76ba88b28f (diff)
gpu: nvgpu: Add wrapper over atomic_t and atomic64_t
- added wrapper structs nvgpu_atomic_t and nvgpu_atomic64_t over atomic_t and atomic64_t - added nvgpu_atomic_* and nvgpu_atomic64_* APIs to access the above wrappers. JIRA NVGPU-121 Change-Id: I61667bb0a84c2fc475365abb79bffb42b8b4786a Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1533044 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c39
1 files changed, 20 insertions, 19 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 62b312b2..d96872f3 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -100,7 +100,7 @@ static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f)
100 ch = nvgpu_list_first_entry(&f->free_chs, channel_gk20a, 100 ch = nvgpu_list_first_entry(&f->free_chs, channel_gk20a,
101 free_chs); 101 free_chs);
102 nvgpu_list_del(&ch->free_chs); 102 nvgpu_list_del(&ch->free_chs);
103 WARN_ON(atomic_read(&ch->ref_count)); 103 WARN_ON(nvgpu_atomic_read(&ch->ref_count));
104 WARN_ON(ch->referenceable); 104 WARN_ON(ch->referenceable);
105 f->used_channels++; 105 f->used_channels++;
106 } 106 }
@@ -394,20 +394,20 @@ void gk20a_set_error_notifier(struct channel_gk20a *ch, __u32 error)
394} 394}
395 395
396static void gk20a_wait_until_counter_is_N( 396static void gk20a_wait_until_counter_is_N(
397 struct channel_gk20a *ch, atomic_t *counter, int wait_value, 397 struct channel_gk20a *ch, nvgpu_atomic_t *counter, int wait_value,
398 struct nvgpu_cond *c, const char *caller, const char *counter_name) 398 struct nvgpu_cond *c, const char *caller, const char *counter_name)
399{ 399{
400 while (true) { 400 while (true) {
401 if (NVGPU_COND_WAIT( 401 if (NVGPU_COND_WAIT(
402 c, 402 c,
403 atomic_read(counter) == wait_value, 403 nvgpu_atomic_read(counter) == wait_value,
404 5000) == 0) 404 5000) == 0)
405 break; 405 break;
406 406
407 nvgpu_warn(ch->g, 407 nvgpu_warn(ch->g,
408 "%s: channel %d, still waiting, %s left: %d, waiting for: %d", 408 "%s: channel %d, still waiting, %s left: %d, waiting for: %d",
409 caller, ch->chid, counter_name, 409 caller, ch->chid, counter_name,
410 atomic_read(counter), wait_value); 410 nvgpu_atomic_read(counter), wait_value);
411 411
412 gk20a_channel_dump_ref_actions(ch); 412 gk20a_channel_dump_ref_actions(ch);
413 } 413 }
@@ -491,7 +491,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
491 nvgpu_spinlock_release(&ch->ref_obtain_lock); 491 nvgpu_spinlock_release(&ch->ref_obtain_lock);
492 492
493 /* matches with the initial reference in gk20a_open_new_channel() */ 493 /* matches with the initial reference in gk20a_open_new_channel() */
494 atomic_dec(&ch->ref_count); 494 nvgpu_atomic_dec(&ch->ref_count);
495 495
496 /* wait until no more refs to the channel */ 496 /* wait until no more refs to the channel */
497 if (!force) 497 if (!force)
@@ -635,7 +635,7 @@ static void gk20a_channel_dump_ref_actions(struct channel_gk20a *ch)
635 nvgpu_spinlock_acquire(&ch->ref_actions_lock); 635 nvgpu_spinlock_acquire(&ch->ref_actions_lock);
636 636
637 dev_info(dev, "ch %d: refs %d. Actions, most recent last:\n", 637 dev_info(dev, "ch %d: refs %d. Actions, most recent last:\n",
638 ch->chid, atomic_read(&ch->ref_count)); 638 ch->chid, nvgpu_atomic_read(&ch->ref_count));
639 639
640 /* start at the oldest possible entry. put is next insertion point */ 640 /* start at the oldest possible entry. put is next insertion point */
641 get = ch->ref_actions_put; 641 get = ch->ref_actions_put;
@@ -709,7 +709,7 @@ struct channel_gk20a *_gk20a_channel_get(struct channel_gk20a *ch,
709 709
710 if (likely(ch->referenceable)) { 710 if (likely(ch->referenceable)) {
711 gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_get); 711 gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_get);
712 atomic_inc(&ch->ref_count); 712 nvgpu_atomic_inc(&ch->ref_count);
713 ret = ch; 713 ret = ch;
714 } else 714 } else
715 ret = NULL; 715 ret = NULL;
@@ -726,17 +726,17 @@ void _gk20a_channel_put(struct channel_gk20a *ch, const char *caller)
726{ 726{
727 gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_put); 727 gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_put);
728 trace_gk20a_channel_put(ch->chid, caller); 728 trace_gk20a_channel_put(ch->chid, caller);
729 atomic_dec(&ch->ref_count); 729 nvgpu_atomic_dec(&ch->ref_count);
730 nvgpu_cond_broadcast(&ch->ref_count_dec_wq); 730 nvgpu_cond_broadcast(&ch->ref_count_dec_wq);
731 731
732 /* More puts than gets. Channel is probably going to get 732 /* More puts than gets. Channel is probably going to get
733 * stuck. */ 733 * stuck. */
734 WARN_ON(atomic_read(&ch->ref_count) < 0); 734 WARN_ON(nvgpu_atomic_read(&ch->ref_count) < 0);
735 735
736 /* Also, more puts than gets. ref_count can go to 0 only if 736 /* Also, more puts than gets. ref_count can go to 0 only if
737 * the channel is closing. Channel is probably going to get 737 * the channel is closing. Channel is probably going to get
738 * stuck. */ 738 * stuck. */
739 WARN_ON(atomic_read(&ch->ref_count) == 0 && ch->referenceable); 739 WARN_ON(nvgpu_atomic_read(&ch->ref_count) == 0 && ch->referenceable);
740} 740}
741 741
742void gk20a_channel_close(struct channel_gk20a *ch) 742void gk20a_channel_close(struct channel_gk20a *ch)
@@ -879,7 +879,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
879 * references. The initial reference will be decreased in 879 * references. The initial reference will be decreased in
880 * gk20a_free_channel() */ 880 * gk20a_free_channel() */
881 ch->referenceable = true; 881 ch->referenceable = true;
882 atomic_set(&ch->ref_count, 1); 882 nvgpu_atomic_set(&ch->ref_count, 1);
883 wmb(); 883 wmb();
884 884
885 return ch; 885 return ch;
@@ -1745,7 +1745,7 @@ static int __gk20a_channel_worker_wakeup(struct gk20a *g)
1745 * pair. 1745 * pair.
1746 */ 1746 */
1747 1747
1748 put = atomic_inc_return(&g->channel_worker.put); 1748 put = nvgpu_atomic_inc_return(&g->channel_worker.put);
1749 nvgpu_cond_signal(&g->channel_worker.wq); 1749 nvgpu_cond_signal(&g->channel_worker.wq);
1750 1750
1751 return put; 1751 return put;
@@ -1761,7 +1761,7 @@ static int __gk20a_channel_worker_wakeup(struct gk20a *g)
1761 */ 1761 */
1762static bool __gk20a_channel_worker_pending(struct gk20a *g, int get) 1762static bool __gk20a_channel_worker_pending(struct gk20a *g, int get)
1763{ 1763{
1764 bool pending = atomic_read(&g->channel_worker.put) != get; 1764 bool pending = nvgpu_atomic_read(&g->channel_worker.put) != get;
1765 1765
1766 /* 1766 /*
1767 * This would be the place for a rmb() pairing a wmb() for a wakeup 1767 * This would be the place for a rmb() pairing a wmb() for a wakeup
@@ -1864,7 +1864,7 @@ int nvgpu_channel_worker_init(struct gk20a *g)
1864 int err; 1864 int err;
1865 char thread_name[64]; 1865 char thread_name[64];
1866 1866
1867 atomic_set(&g->channel_worker.put, 0); 1867 nvgpu_atomic_set(&g->channel_worker.put, 0);
1868 nvgpu_cond_init(&g->channel_worker.wq); 1868 nvgpu_cond_init(&g->channel_worker.wq);
1869 nvgpu_init_list_node(&g->channel_worker.items); 1869 nvgpu_init_list_node(&g->channel_worker.items);
1870 nvgpu_spinlock_init(&g->channel_worker.items_lock); 1870 nvgpu_spinlock_init(&g->channel_worker.items_lock);
@@ -2086,7 +2086,8 @@ static void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
2086 2086
2087 if (g->aggressive_sync_destroy_thresh) { 2087 if (g->aggressive_sync_destroy_thresh) {
2088 nvgpu_mutex_acquire(&c->sync_lock); 2088 nvgpu_mutex_acquire(&c->sync_lock);
2089 if (atomic_dec_and_test(&c->sync->refcount) && 2089 if (nvgpu_atomic_dec_and_test(
2090 &c->sync->refcount) &&
2090 g->aggressive_sync_destroy) { 2091 g->aggressive_sync_destroy) {
2091 gk20a_channel_sync_destroy(c->sync); 2092 gk20a_channel_sync_destroy(c->sync);
2092 c->sync = NULL; 2093 c->sync = NULL;
@@ -2321,7 +2322,7 @@ static int gk20a_submit_prepare_syncs(struct channel_gk20a *c,
2321 } 2322 }
2322 new_sync_created = true; 2323 new_sync_created = true;
2323 } 2324 }
2324 atomic_inc(&c->sync->refcount); 2325 nvgpu_atomic_inc(&c->sync->refcount);
2325 nvgpu_mutex_release(&c->sync_lock); 2326 nvgpu_mutex_release(&c->sync_lock);
2326 } 2327 }
2327 2328
@@ -2774,9 +2775,9 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
2774 2775
2775 c->g = NULL; 2776 c->g = NULL;
2776 c->chid = chid; 2777 c->chid = chid;
2777 atomic_set(&c->bound, false); 2778 nvgpu_atomic_set(&c->bound, false);
2778 nvgpu_spinlock_init(&c->ref_obtain_lock); 2779 nvgpu_spinlock_init(&c->ref_obtain_lock);
2779 atomic_set(&c->ref_count, 0); 2780 nvgpu_atomic_set(&c->ref_count, 0);
2780 c->referenceable = false; 2781 c->referenceable = false;
2781 nvgpu_cond_init(&c->ref_count_dec_wq); 2782 nvgpu_cond_init(&c->ref_count_dec_wq);
2782 2783
@@ -2935,7 +2936,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
2935 for (chid = 0; chid < f->num_channels; chid++) { 2936 for (chid = 0; chid < f->num_channels; chid++) {
2936 struct channel_gk20a *c = g->fifo.channel+chid; 2937 struct channel_gk20a *c = g->fifo.channel+chid;
2937 if (gk20a_channel_get(c)) { 2938 if (gk20a_channel_get(c)) {
2938 if (atomic_read(&c->bound)) { 2939 if (nvgpu_atomic_read(&c->bound)) {
2939 nvgpu_cond_broadcast_interruptible( 2940 nvgpu_cond_broadcast_interruptible(
2940 &c->semaphore_wq); 2941 &c->semaphore_wq);
2941 if (post_events) { 2942 if (post_events) {