summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
authorDebarshi Dutta <ddutta@nvidia.com>2017-08-03 06:04:44 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-08-17 17:26:47 -0400
commit98186ec2c2127c2af65a34f9e697e04f518a79ab (patch)
tree08ad87f3bf8c739e96b36f01728a8f7a30749a0e /drivers/gpu/nvgpu/gk20a
parent49dc335cfe588179cbb42d8bab53bc76ba88b28f (diff)
gpu: nvgpu: Add wrapper over atomic_t and atomic64_t
- added wrapper structs nvgpu_atomic_t and nvgpu_atomic64_t over atomic_t and atomic64_t - added nvgpu_atomic_* and nvgpu_atomic64_* APIs to access the above wrappers. JIRA NVGPU-121 Change-Id: I61667bb0a84c2fc475365abb79bffb42b8b4786a Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1533044 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c39
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.h5
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c6
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c14
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c5
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c6
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h5
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c6
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h3
12 files changed, 51 insertions, 46 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 62b312b2..d96872f3 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -100,7 +100,7 @@ static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f)
100 ch = nvgpu_list_first_entry(&f->free_chs, channel_gk20a, 100 ch = nvgpu_list_first_entry(&f->free_chs, channel_gk20a,
101 free_chs); 101 free_chs);
102 nvgpu_list_del(&ch->free_chs); 102 nvgpu_list_del(&ch->free_chs);
103 WARN_ON(atomic_read(&ch->ref_count)); 103 WARN_ON(nvgpu_atomic_read(&ch->ref_count));
104 WARN_ON(ch->referenceable); 104 WARN_ON(ch->referenceable);
105 f->used_channels++; 105 f->used_channels++;
106 } 106 }
@@ -394,20 +394,20 @@ void gk20a_set_error_notifier(struct channel_gk20a *ch, __u32 error)
394} 394}
395 395
396static void gk20a_wait_until_counter_is_N( 396static void gk20a_wait_until_counter_is_N(
397 struct channel_gk20a *ch, atomic_t *counter, int wait_value, 397 struct channel_gk20a *ch, nvgpu_atomic_t *counter, int wait_value,
398 struct nvgpu_cond *c, const char *caller, const char *counter_name) 398 struct nvgpu_cond *c, const char *caller, const char *counter_name)
399{ 399{
400 while (true) { 400 while (true) {
401 if (NVGPU_COND_WAIT( 401 if (NVGPU_COND_WAIT(
402 c, 402 c,
403 atomic_read(counter) == wait_value, 403 nvgpu_atomic_read(counter) == wait_value,
404 5000) == 0) 404 5000) == 0)
405 break; 405 break;
406 406
407 nvgpu_warn(ch->g, 407 nvgpu_warn(ch->g,
408 "%s: channel %d, still waiting, %s left: %d, waiting for: %d", 408 "%s: channel %d, still waiting, %s left: %d, waiting for: %d",
409 caller, ch->chid, counter_name, 409 caller, ch->chid, counter_name,
410 atomic_read(counter), wait_value); 410 nvgpu_atomic_read(counter), wait_value);
411 411
412 gk20a_channel_dump_ref_actions(ch); 412 gk20a_channel_dump_ref_actions(ch);
413 } 413 }
@@ -491,7 +491,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
491 nvgpu_spinlock_release(&ch->ref_obtain_lock); 491 nvgpu_spinlock_release(&ch->ref_obtain_lock);
492 492
493 /* matches with the initial reference in gk20a_open_new_channel() */ 493 /* matches with the initial reference in gk20a_open_new_channel() */
494 atomic_dec(&ch->ref_count); 494 nvgpu_atomic_dec(&ch->ref_count);
495 495
496 /* wait until no more refs to the channel */ 496 /* wait until no more refs to the channel */
497 if (!force) 497 if (!force)
@@ -635,7 +635,7 @@ static void gk20a_channel_dump_ref_actions(struct channel_gk20a *ch)
635 nvgpu_spinlock_acquire(&ch->ref_actions_lock); 635 nvgpu_spinlock_acquire(&ch->ref_actions_lock);
636 636
637 dev_info(dev, "ch %d: refs %d. Actions, most recent last:\n", 637 dev_info(dev, "ch %d: refs %d. Actions, most recent last:\n",
638 ch->chid, atomic_read(&ch->ref_count)); 638 ch->chid, nvgpu_atomic_read(&ch->ref_count));
639 639
640 /* start at the oldest possible entry. put is next insertion point */ 640 /* start at the oldest possible entry. put is next insertion point */
641 get = ch->ref_actions_put; 641 get = ch->ref_actions_put;
@@ -709,7 +709,7 @@ struct channel_gk20a *_gk20a_channel_get(struct channel_gk20a *ch,
709 709
710 if (likely(ch->referenceable)) { 710 if (likely(ch->referenceable)) {
711 gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_get); 711 gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_get);
712 atomic_inc(&ch->ref_count); 712 nvgpu_atomic_inc(&ch->ref_count);
713 ret = ch; 713 ret = ch;
714 } else 714 } else
715 ret = NULL; 715 ret = NULL;
@@ -726,17 +726,17 @@ void _gk20a_channel_put(struct channel_gk20a *ch, const char *caller)
726{ 726{
727 gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_put); 727 gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_put);
728 trace_gk20a_channel_put(ch->chid, caller); 728 trace_gk20a_channel_put(ch->chid, caller);
729 atomic_dec(&ch->ref_count); 729 nvgpu_atomic_dec(&ch->ref_count);
730 nvgpu_cond_broadcast(&ch->ref_count_dec_wq); 730 nvgpu_cond_broadcast(&ch->ref_count_dec_wq);
731 731
732 /* More puts than gets. Channel is probably going to get 732 /* More puts than gets. Channel is probably going to get
733 * stuck. */ 733 * stuck. */
734 WARN_ON(atomic_read(&ch->ref_count) < 0); 734 WARN_ON(nvgpu_atomic_read(&ch->ref_count) < 0);
735 735
736 /* Also, more puts than gets. ref_count can go to 0 only if 736 /* Also, more puts than gets. ref_count can go to 0 only if
737 * the channel is closing. Channel is probably going to get 737 * the channel is closing. Channel is probably going to get
738 * stuck. */ 738 * stuck. */
739 WARN_ON(atomic_read(&ch->ref_count) == 0 && ch->referenceable); 739 WARN_ON(nvgpu_atomic_read(&ch->ref_count) == 0 && ch->referenceable);
740} 740}
741 741
742void gk20a_channel_close(struct channel_gk20a *ch) 742void gk20a_channel_close(struct channel_gk20a *ch)
@@ -879,7 +879,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
879 * references. The initial reference will be decreased in 879 * references. The initial reference will be decreased in
880 * gk20a_free_channel() */ 880 * gk20a_free_channel() */
881 ch->referenceable = true; 881 ch->referenceable = true;
882 atomic_set(&ch->ref_count, 1); 882 nvgpu_atomic_set(&ch->ref_count, 1);
883 wmb(); 883 wmb();
884 884
885 return ch; 885 return ch;
@@ -1745,7 +1745,7 @@ static int __gk20a_channel_worker_wakeup(struct gk20a *g)
1745 * pair. 1745 * pair.
1746 */ 1746 */
1747 1747
1748 put = atomic_inc_return(&g->channel_worker.put); 1748 put = nvgpu_atomic_inc_return(&g->channel_worker.put);
1749 nvgpu_cond_signal(&g->channel_worker.wq); 1749 nvgpu_cond_signal(&g->channel_worker.wq);
1750 1750
1751 return put; 1751 return put;
@@ -1761,7 +1761,7 @@ static int __gk20a_channel_worker_wakeup(struct gk20a *g)
1761 */ 1761 */
1762static bool __gk20a_channel_worker_pending(struct gk20a *g, int get) 1762static bool __gk20a_channel_worker_pending(struct gk20a *g, int get)
1763{ 1763{
1764 bool pending = atomic_read(&g->channel_worker.put) != get; 1764 bool pending = nvgpu_atomic_read(&g->channel_worker.put) != get;
1765 1765
1766 /* 1766 /*
1767 * This would be the place for a rmb() pairing a wmb() for a wakeup 1767 * This would be the place for a rmb() pairing a wmb() for a wakeup
@@ -1864,7 +1864,7 @@ int nvgpu_channel_worker_init(struct gk20a *g)
1864 int err; 1864 int err;
1865 char thread_name[64]; 1865 char thread_name[64];
1866 1866
1867 atomic_set(&g->channel_worker.put, 0); 1867 nvgpu_atomic_set(&g->channel_worker.put, 0);
1868 nvgpu_cond_init(&g->channel_worker.wq); 1868 nvgpu_cond_init(&g->channel_worker.wq);
1869 nvgpu_init_list_node(&g->channel_worker.items); 1869 nvgpu_init_list_node(&g->channel_worker.items);
1870 nvgpu_spinlock_init(&g->channel_worker.items_lock); 1870 nvgpu_spinlock_init(&g->channel_worker.items_lock);
@@ -2086,7 +2086,8 @@ static void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
2086 2086
2087 if (g->aggressive_sync_destroy_thresh) { 2087 if (g->aggressive_sync_destroy_thresh) {
2088 nvgpu_mutex_acquire(&c->sync_lock); 2088 nvgpu_mutex_acquire(&c->sync_lock);
2089 if (atomic_dec_and_test(&c->sync->refcount) && 2089 if (nvgpu_atomic_dec_and_test(
2090 &c->sync->refcount) &&
2090 g->aggressive_sync_destroy) { 2091 g->aggressive_sync_destroy) {
2091 gk20a_channel_sync_destroy(c->sync); 2092 gk20a_channel_sync_destroy(c->sync);
2092 c->sync = NULL; 2093 c->sync = NULL;
@@ -2321,7 +2322,7 @@ static int gk20a_submit_prepare_syncs(struct channel_gk20a *c,
2321 } 2322 }
2322 new_sync_created = true; 2323 new_sync_created = true;
2323 } 2324 }
2324 atomic_inc(&c->sync->refcount); 2325 nvgpu_atomic_inc(&c->sync->refcount);
2325 nvgpu_mutex_release(&c->sync_lock); 2326 nvgpu_mutex_release(&c->sync_lock);
2326 } 2327 }
2327 2328
@@ -2774,9 +2775,9 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
2774 2775
2775 c->g = NULL; 2776 c->g = NULL;
2776 c->chid = chid; 2777 c->chid = chid;
2777 atomic_set(&c->bound, false); 2778 nvgpu_atomic_set(&c->bound, false);
2778 nvgpu_spinlock_init(&c->ref_obtain_lock); 2779 nvgpu_spinlock_init(&c->ref_obtain_lock);
2779 atomic_set(&c->ref_count, 0); 2780 nvgpu_atomic_set(&c->ref_count, 0);
2780 c->referenceable = false; 2781 c->referenceable = false;
2781 nvgpu_cond_init(&c->ref_count_dec_wq); 2782 nvgpu_cond_init(&c->ref_count_dec_wq);
2782 2783
@@ -2935,7 +2936,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
2935 for (chid = 0; chid < f->num_channels; chid++) { 2936 for (chid = 0; chid < f->num_channels; chid++) {
2936 struct channel_gk20a *c = g->fifo.channel+chid; 2937 struct channel_gk20a *c = g->fifo.channel+chid;
2937 if (gk20a_channel_get(c)) { 2938 if (gk20a_channel_get(c)) {
2938 if (atomic_read(&c->bound)) { 2939 if (nvgpu_atomic_read(&c->bound)) {
2939 nvgpu_cond_broadcast_interruptible( 2940 nvgpu_cond_broadcast_interruptible(
2940 &c->semaphore_wq); 2941 &c->semaphore_wq);
2941 if (post_events) { 2942 if (post_events) {
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
index a9ccd93f..f022e630 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
@@ -24,6 +24,7 @@
24#include <nvgpu/lock.h> 24#include <nvgpu/lock.h>
25#include <nvgpu/timers.h> 25#include <nvgpu/timers.h>
26#include <nvgpu/cond.h> 26#include <nvgpu/cond.h>
27#include <nvgpu/atomic.h>
27 28
28struct gk20a; 29struct gk20a;
29struct gr_gk20a; 30struct gr_gk20a;
@@ -173,7 +174,7 @@ struct channel_gk20a {
173 174
174 struct nvgpu_spinlock ref_obtain_lock; 175 struct nvgpu_spinlock ref_obtain_lock;
175 bool referenceable; 176 bool referenceable;
176 atomic_t ref_count; 177 nvgpu_atomic_t ref_count;
177 struct nvgpu_cond ref_count_dec_wq; 178 struct nvgpu_cond ref_count_dec_wq;
178#if GK20A_CHANNEL_REFCOUNT_TRACKING 179#if GK20A_CHANNEL_REFCOUNT_TRACKING
179 /* 180 /*
@@ -191,7 +192,7 @@ struct channel_gk20a {
191 192
192 int chid; 193 int chid;
193 bool wdt_enabled; 194 bool wdt_enabled;
194 atomic_t bound; 195 nvgpu_atomic_t bound;
195 bool first_init; 196 bool first_init;
196 bool vpr; 197 bool vpr;
197 bool deterministic; 198 bool deterministic;
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
index c9c03d37..aa340ba6 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
@@ -346,7 +346,7 @@ gk20a_channel_syncpt_create(struct channel_gk20a *c)
346 346
347 nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost_dev, sp->id); 347 nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost_dev, sp->id);
348 348
349 atomic_set(&sp->ops.refcount, 0); 349 nvgpu_atomic_set(&sp->ops.refcount, 0);
350 sp->ops.wait_syncpt = gk20a_channel_syncpt_wait_syncpt; 350 sp->ops.wait_syncpt = gk20a_channel_syncpt_wait_syncpt;
351 sp->ops.wait_fd = gk20a_channel_syncpt_wait_fd; 351 sp->ops.wait_fd = gk20a_channel_syncpt_wait_fd;
352 sp->ops.incr = gk20a_channel_syncpt_incr; 352 sp->ops.incr = gk20a_channel_syncpt_incr;
@@ -619,7 +619,7 @@ static int __semaphore_wait_fd_fast_path(struct channel_gk20a *c,
619 return err; 619 return err;
620 620
621 nvgpu_semaphore_get(sema); 621 nvgpu_semaphore_get(sema);
622 BUG_ON(!atomic_read(&sema->value)); 622 BUG_ON(!nvgpu_atomic_read(&sema->value));
623 add_sema_cmd(c->g, c, sema, wait_cmd, 8, true, false); 623 add_sema_cmd(c->g, c, sema, wait_cmd, 8, true, false);
624 624
625 /* 625 /*
@@ -922,7 +922,7 @@ gk20a_channel_semaphore_create(struct channel_gk20a *c)
922 return NULL; 922 return NULL;
923 } 923 }
924#endif 924#endif
925 atomic_set(&sema->ops.refcount, 0); 925 nvgpu_atomic_set(&sema->ops.refcount, 0);
926 sema->ops.wait_syncpt = gk20a_channel_semaphore_wait_syncpt; 926 sema->ops.wait_syncpt = gk20a_channel_semaphore_wait_syncpt;
927 sema->ops.wait_fd = gk20a_channel_semaphore_wait_fd; 927 sema->ops.wait_fd = gk20a_channel_semaphore_wait_fd;
928 sema->ops.incr = gk20a_channel_semaphore_incr; 928 sema->ops.incr = gk20a_channel_semaphore_incr;
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h
index 4efd1b76..9bdc5d12 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h
@@ -25,7 +25,7 @@ struct gk20a_fence;
25struct gk20a; 25struct gk20a;
26 26
27struct gk20a_channel_sync { 27struct gk20a_channel_sync {
28 atomic_t refcount; 28 nvgpu_atomic_t refcount;
29 29
30 /* Generate a gpu wait cmdbuf from syncpoint. 30 /* Generate a gpu wait cmdbuf from syncpoint.
31 * Returns 31 * Returns
diff --git a/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c
index cc05ceff..546917f1 100644
--- a/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c
@@ -47,7 +47,7 @@ struct gk20a_ctxsw_dev {
47 size_t size; 47 size_t size;
48 u32 num_ents; 48 u32 num_ents;
49 49
50 atomic_t vma_ref; 50 nvgpu_atomic_t vma_ref;
51 51
52 struct nvgpu_mutex write_lock; 52 struct nvgpu_mutex write_lock;
53}; 53};
@@ -152,7 +152,7 @@ static int gk20a_ctxsw_dev_alloc_buffer(struct gk20a_ctxsw_dev *dev,
152 void *buf; 152 void *buf;
153 int err; 153 int err;
154 154
155 if ((dev->write_enabled) || (atomic_read(&dev->vma_ref))) 155 if ((dev->write_enabled) || (nvgpu_atomic_read(&dev->vma_ref)))
156 return -EBUSY; 156 return -EBUSY;
157 157
158 err = g->ops.fecs_trace.alloc_user_buffer(g, &buf, &size); 158 err = g->ops.fecs_trace.alloc_user_buffer(g, &buf, &size);
@@ -438,18 +438,18 @@ static void gk20a_ctxsw_dev_vma_open(struct vm_area_struct *vma)
438{ 438{
439 struct gk20a_ctxsw_dev *dev = vma->vm_private_data; 439 struct gk20a_ctxsw_dev *dev = vma->vm_private_data;
440 440
441 atomic_inc(&dev->vma_ref); 441 nvgpu_atomic_inc(&dev->vma_ref);
442 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "vma_ref=%d", 442 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "vma_ref=%d",
443 atomic_read(&dev->vma_ref)); 443 nvgpu_atomic_read(&dev->vma_ref));
444} 444}
445 445
446static void gk20a_ctxsw_dev_vma_close(struct vm_area_struct *vma) 446static void gk20a_ctxsw_dev_vma_close(struct vm_area_struct *vma)
447{ 447{
448 struct gk20a_ctxsw_dev *dev = vma->vm_private_data; 448 struct gk20a_ctxsw_dev *dev = vma->vm_private_data;
449 449
450 atomic_dec(&dev->vma_ref); 450 nvgpu_atomic_dec(&dev->vma_ref);
451 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "vma_ref=%d", 451 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "vma_ref=%d",
452 atomic_read(&dev->vma_ref)); 452 nvgpu_atomic_read(&dev->vma_ref));
453} 453}
454 454
455static struct vm_operations_struct gk20a_ctxsw_dev_vma_ops = { 455static struct vm_operations_struct gk20a_ctxsw_dev_vma_ops = {
@@ -497,7 +497,7 @@ static int gk20a_ctxsw_init_devs(struct gk20a *g)
497 err = nvgpu_mutex_init(&dev->write_lock); 497 err = nvgpu_mutex_init(&dev->write_lock);
498 if (err) 498 if (err)
499 return err; 499 return err;
500 atomic_set(&dev->vma_ref, 0); 500 nvgpu_atomic_set(&dev->vma_ref, 0);
501 dev++; 501 dev++;
502 } 502 }
503 return 0; 503 return 0;
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index 1572ff48..00050850 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -26,6 +26,7 @@
26#include <nvgpu/kmem.h> 26#include <nvgpu/kmem.h>
27#include <nvgpu/log.h> 27#include <nvgpu/log.h>
28#include <nvgpu/vm.h> 28#include <nvgpu/vm.h>
29#include <nvgpu/atomic.h>
29 30
30#include "gk20a.h" 31#include "gk20a.h"
31#include "gk20a/platform_gk20a.h" 32#include "gk20a/platform_gk20a.h"
@@ -74,10 +75,10 @@ nvgpu_dbg_gpu_get_session_channel(struct dbg_session_gk20a *dbg_s)
74} 75}
75 76
76/* silly allocator - just increment id */ 77/* silly allocator - just increment id */
77static atomic_t unique_id = ATOMIC_INIT(0); 78static nvgpu_atomic_t unique_id = NVGPU_ATOMIC_INIT(0);
78static int generate_unique_id(void) 79static int generate_unique_id(void)
79{ 80{
80 return atomic_add_return(1, &unique_id); 81 return nvgpu_atomic_add_return(1, &unique_id);
81} 82}
82 83
83static int alloc_session(struct gk20a *g, struct dbg_session_gk20a **_dbg_s) 84static int alloc_session(struct gk20a *g, struct dbg_session_gk20a **_dbg_s)
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index abd455d7..47e7d82e 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -3439,7 +3439,7 @@ void gk20a_dump_channel_status_ramfc(struct gk20a *g,
3439 gk20a_debug_output(o, "SEMA STATE: value: 0x%08x " 3439 gk20a_debug_output(o, "SEMA STATE: value: 0x%08x "
3440 "next_val: 0x%08x addr: 0x%010llx\n", 3440 "next_val: 0x%08x addr: 0x%010llx\n",
3441 __nvgpu_semaphore_read(hw_sema), 3441 __nvgpu_semaphore_read(hw_sema),
3442 atomic_read(&hw_sema->next_value), 3442 nvgpu_atomic_read(&hw_sema->next_value),
3443 nvgpu_hw_sema_addr(hw_sema)); 3443 nvgpu_hw_sema_addr(hw_sema));
3444 3444
3445#ifdef CONFIG_TEGRA_GK20A_NVHOST 3445#ifdef CONFIG_TEGRA_GK20A_NVHOST
@@ -3489,7 +3489,7 @@ void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g,
3489 continue; 3489 continue;
3490 3490
3491 ch_state[chid]->pid = ch->pid; 3491 ch_state[chid]->pid = ch->pid;
3492 ch_state[chid]->refs = atomic_read(&ch->ref_count); 3492 ch_state[chid]->refs = nvgpu_atomic_read(&ch->ref_count);
3493 ch_state[chid]->deterministic = ch->deterministic; 3493 ch_state[chid]->deterministic = ch->deterministic;
3494 nvgpu_mem_rd_n(g, &ch->inst_block, 0, 3494 nvgpu_mem_rd_n(g, &ch->inst_block, 0,
3495 &ch_state[chid]->inst_block[0], 3495 &ch_state[chid]->inst_block[0],
@@ -3591,7 +3591,7 @@ void gk20a_fifo_channel_unbind(struct channel_gk20a *ch_gk20a)
3591 3591
3592 gk20a_dbg_fn(""); 3592 gk20a_dbg_fn("");
3593 3593
3594 if (atomic_cmpxchg(&ch_gk20a->bound, true, false)) { 3594 if (nvgpu_atomic_cmpxchg(&ch_gk20a->bound, true, false)) {
3595 gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->chid), 3595 gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->chid),
3596 ccsr_channel_inst_ptr_f(0) | 3596 ccsr_channel_inst_ptr_f(0) |
3597 ccsr_channel_inst_bind_false_f()); 3597 ccsr_channel_inst_bind_false_f());
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
index b19a7b68..a6eae8ca 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
@@ -156,7 +156,7 @@ struct fifo_gk20a {
156#ifdef CONFIG_DEBUG_FS 156#ifdef CONFIG_DEBUG_FS
157 struct { 157 struct {
158 struct fifo_profile_gk20a *data; 158 struct fifo_profile_gk20a *data;
159 atomic_t get; 159 nvgpu_atomic_t get;
160 bool enabled; 160 bool enabled;
161 u64 *sorted; 161 u64 *sorted;
162 struct kref ref; 162 struct kref ref;
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c
index c50d800f..550b22c0 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.c
@@ -373,13 +373,13 @@ int gk20a_wait_for_idle(struct gk20a *g)
373 if (g->user_railgate_disabled) 373 if (g->user_railgate_disabled)
374 target_usage_count = 1; 374 target_usage_count = 1;
375 375
376 while ((atomic_read(&g->usage_count) != target_usage_count) 376 while ((nvgpu_atomic_read(&g->usage_count) != target_usage_count)
377 && (wait_length-- >= 0)) 377 && (wait_length-- >= 0))
378 nvgpu_msleep(20); 378 nvgpu_msleep(20);
379 379
380 if (wait_length < 0) { 380 if (wait_length < 0) {
381 pr_warn("%s: Timed out waiting for idle (%d)!\n", 381 pr_warn("%s: Timed out waiting for idle (%d)!\n",
382 __func__, atomic_read(&g->usage_count)); 382 __func__, nvgpu_atomic_read(&g->usage_count));
383 return -ETIMEDOUT; 383 return -ETIMEDOUT;
384 } 384 }
385 385
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 4878fdd6..47fd3aef 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -48,6 +48,7 @@ struct nvgpu_cpu_time_correlation_sample;
48#include <nvgpu/kref.h> 48#include <nvgpu/kref.h>
49#include <nvgpu/falcon.h> 49#include <nvgpu/falcon.h>
50#include <nvgpu/pmu.h> 50#include <nvgpu/pmu.h>
51#include <nvgpu/atomic.h>
51 52
52#include "clk_gk20a.h" 53#include "clk_gk20a.h"
53#include "ce2_gk20a.h" 54#include "ce2_gk20a.h"
@@ -1038,7 +1039,7 @@ struct gk20a {
1038 */ 1039 */
1039 unsigned long *enabled_flags; 1040 unsigned long *enabled_flags;
1040 1041
1041 atomic_t usage_count; 1042 nvgpu_atomic_t usage_count;
1042 1043
1043 struct kref refcount; 1044 struct kref refcount;
1044 1045
@@ -1205,7 +1206,7 @@ struct gk20a {
1205 1206
1206 struct gk20a_channel_worker { 1207 struct gk20a_channel_worker {
1207 struct nvgpu_thread poll_task; 1208 struct nvgpu_thread poll_task;
1208 atomic_t put; 1209 nvgpu_atomic_t put;
1209 struct nvgpu_cond wq; 1210 struct nvgpu_cond wq;
1210 struct nvgpu_list_node items; 1211 struct nvgpu_list_node items;
1211 struct nvgpu_spinlock items_lock; 1212 struct nvgpu_spinlock items_lock;
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 16fe7149..e21be1e5 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -617,7 +617,7 @@ static int gk20a_init_vidmem(struct mm_gk20a *mm)
617 nvgpu_mutex_init(&mm->vidmem.first_clear_mutex); 617 nvgpu_mutex_init(&mm->vidmem.first_clear_mutex);
618 618
619 INIT_WORK(&mm->vidmem.clear_mem_worker, gk20a_vidmem_clear_mem_worker); 619 INIT_WORK(&mm->vidmem.clear_mem_worker, gk20a_vidmem_clear_mem_worker);
620 atomic64_set(&mm->vidmem.bytes_pending, 0); 620 nvgpu_atomic64_set(&mm->vidmem.bytes_pending, 0);
621 nvgpu_init_list_node(&mm->vidmem.clear_list_head); 621 nvgpu_init_list_node(&mm->vidmem.clear_list_head);
622 nvgpu_mutex_init(&mm->vidmem.clear_list_mutex); 622 nvgpu_mutex_init(&mm->vidmem.clear_list_mutex);
623 623
@@ -1165,7 +1165,7 @@ int gk20a_vidmem_get_space(struct gk20a *g, u64 *space)
1165 1165
1166 nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex); 1166 nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex);
1167 *space = nvgpu_alloc_space(allocator) + 1167 *space = nvgpu_alloc_space(allocator) +
1168 atomic64_read(&g->mm.vidmem.bytes_pending); 1168 nvgpu_atomic64_read(&g->mm.vidmem.bytes_pending);
1169 nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex); 1169 nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex);
1170 return 0; 1170 return 0;
1171#else 1171#else
@@ -1483,7 +1483,7 @@ static void gk20a_vidmem_clear_mem_worker(struct work_struct *work)
1483 (u64)get_vidmem_page_alloc(mem->priv.sgt->sgl)); 1483 (u64)get_vidmem_page_alloc(mem->priv.sgt->sgl));
1484 nvgpu_free_sgtable(g, &mem->priv.sgt); 1484 nvgpu_free_sgtable(g, &mem->priv.sgt);
1485 1485
1486 WARN_ON(atomic64_sub_return(mem->size, 1486 WARN_ON(nvgpu_atomic64_sub_return(mem->size,
1487 &g->mm.vidmem.bytes_pending) < 0); 1487 &g->mm.vidmem.bytes_pending) < 0);
1488 mem->size = 0; 1488 mem->size = 0;
1489 mem->aperture = APERTURE_INVALID; 1489 mem->aperture = APERTURE_INVALID;
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index 7b2c0dfc..af176a73 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -30,6 +30,7 @@
30#include <nvgpu/list.h> 30#include <nvgpu/list.h>
31#include <nvgpu/rbtree.h> 31#include <nvgpu/rbtree.h>
32#include <nvgpu/kref.h> 32#include <nvgpu/kref.h>
33#include <nvgpu/atomic.h>
33 34
34struct nvgpu_pd_cache; 35struct nvgpu_pd_cache;
35 36
@@ -283,7 +284,7 @@ struct mm_gk20a {
283 struct nvgpu_mutex clear_list_mutex; 284 struct nvgpu_mutex clear_list_mutex;
284 285
285 struct work_struct clear_mem_worker; 286 struct work_struct clear_mem_worker;
286 atomic64_t bytes_pending; 287 nvgpu_atomic64_t bytes_pending;
287 } vidmem; 288 } vidmem;
288}; 289};
289 290