summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-01-12 22:18:21 -0500
committerVarun Colbert <vcolbert@nvidia.com>2017-02-13 21:15:03 -0500
commite7a0c0ae8b6791c6b8ee30270ebdbe6e95fbbc71 (patch)
tree0f62852593ad151cc716e258ece88fb5da30a3ae /drivers/gpu/nvgpu/gk20a
parentaa36d3786aeed6755b9744fed37aad000b582322 (diff)
gpu: nvgpu: Move from gk20a_ to nvgpu_ in semaphore code
Change the prefix in the semaphore code to 'nvgpu_' since this code is global to all chips. Bug 1799159 Change-Id: Ic1f3e13428882019e5d1f547acfe95271cc10da5 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1284628 Reviewed-by: Varun Colbert <vcolbert@nvidia.com> Tested-by: Varun Colbert <vcolbert@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c6
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c58
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h1
-rw-r--r--drivers/gpu/nvgpu/gk20a/debug_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/fence_gk20a.c26
-rw-r--r--drivers/gpu/nvgpu/gk20a/fence_gk20a.h6
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c14
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/sync_gk20a.c26
-rw-r--r--drivers/gpu/nvgpu/gk20a/sync_gk20a.h6
12 files changed, 76 insertions, 77 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 3fa6bb25..b963ad96 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -515,7 +515,7 @@ void gk20a_channel_abort_clean_up(struct channel_gk20a *ch)
515 while (tmp_get != put) { 515 while (tmp_get != put) {
516 job = &ch->joblist.pre_alloc.jobs[tmp_get]; 516 job = &ch->joblist.pre_alloc.jobs[tmp_get];
517 if (job->post_fence->semaphore) { 517 if (job->post_fence->semaphore) {
518 __gk20a_semaphore_release( 518 __nvgpu_semaphore_release(
519 job->post_fence->semaphore, true); 519 job->post_fence->semaphore, true);
520 released_job_semaphore = true; 520 released_job_semaphore = true;
521 } 521 }
@@ -525,7 +525,7 @@ void gk20a_channel_abort_clean_up(struct channel_gk20a *ch)
525 list_for_each_entry_safe(job, n, 525 list_for_each_entry_safe(job, n,
526 &ch->joblist.dynamic.jobs, list) { 526 &ch->joblist.dynamic.jobs, list) {
527 if (job->post_fence->semaphore) { 527 if (job->post_fence->semaphore) {
528 __gk20a_semaphore_release( 528 __nvgpu_semaphore_release(
529 job->post_fence->semaphore, true); 529 job->post_fence->semaphore, true);
530 released_job_semaphore = true; 530 released_job_semaphore = true;
531 } 531 }
@@ -1006,7 +1006,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
1006 * as the semaphore pool might get freed after that point. 1006 * as the semaphore pool might get freed after that point.
1007 */ 1007 */
1008 if (ch->hw_sema) 1008 if (ch->hw_sema)
1009 gk20a_semaphore_free_hw_sema(ch); 1009 nvgpu_semaphore_free_hw_sema(ch);
1010 1010
1011 /* 1011 /*
1012 * When releasing the channel we unbind the VM - so release the ref. 1012 * When releasing the channel we unbind the VM - so release the ref.
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
index fce3f461..f940a271 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
@@ -172,7 +172,7 @@ struct channel_gk20a {
172 spinlock_t ref_actions_lock; 172 spinlock_t ref_actions_lock;
173#endif 173#endif
174 174
175 struct gk20a_semaphore_int *hw_sema; 175 struct nvgpu_semaphore_int *hw_sema;
176 176
177 int hw_chid; 177 int hw_chid;
178 bool wdt_enabled; 178 bool wdt_enabled;
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
index 0eba1c30..18971b09 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
@@ -388,7 +388,7 @@ struct gk20a_channel_semaphore {
388 struct channel_gk20a *c; 388 struct channel_gk20a *c;
389 389
390 /* A semaphore pool owned by this channel. */ 390 /* A semaphore pool owned by this channel. */
391 struct gk20a_semaphore_pool *pool; 391 struct nvgpu_semaphore_pool *pool;
392 392
393 /* A sync timeline that advances when gpu completes work. */ 393 /* A sync timeline that advances when gpu completes work. */
394 struct sync_timeline *timeline; 394 struct sync_timeline *timeline;
@@ -399,7 +399,7 @@ struct wait_fence_work {
399 struct sync_fence_waiter waiter; 399 struct sync_fence_waiter waiter;
400 struct sync_fence *fence; 400 struct sync_fence *fence;
401 struct channel_gk20a *ch; 401 struct channel_gk20a *ch;
402 struct gk20a_semaphore *sema; 402 struct nvgpu_semaphore *sema;
403 struct gk20a *g; 403 struct gk20a *g;
404 struct list_head entry; 404 struct list_head entry;
405}; 405};
@@ -501,14 +501,14 @@ static void gk20a_channel_semaphore_launcher(
501 "wait completed (%d) for fence %p '%s', triggering gpu work", 501 "wait completed (%d) for fence %p '%s', triggering gpu work",
502 err, fence, fence->name); 502 err, fence, fence->name);
503 sync_fence_put(fence); 503 sync_fence_put(fence);
504 gk20a_semaphore_release(w->sema); 504 nvgpu_semaphore_release(w->sema);
505 gk20a_semaphore_put(w->sema); 505 nvgpu_semaphore_put(w->sema);
506 kfree(w); 506 kfree(w);
507} 507}
508#endif 508#endif
509 509
510static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c, 510static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
511 struct gk20a_semaphore *s, struct priv_cmd_entry *cmd, 511 struct nvgpu_semaphore *s, struct priv_cmd_entry *cmd,
512 int cmd_size, bool acquire, bool wfi) 512 int cmd_size, bool acquire, bool wfi)
513{ 513{
514 int ch = c->hw_chid; 514 int ch = c->hw_chid;
@@ -521,15 +521,15 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
521 * RO for acquire (since we just need to read the mem) and RW for 521 * RO for acquire (since we just need to read the mem) and RW for
522 * release since we will need to write back to the semaphore memory. 522 * release since we will need to write back to the semaphore memory.
523 */ 523 */
524 va = acquire ? gk20a_semaphore_gpu_ro_va(s) : 524 va = acquire ? nvgpu_semaphore_gpu_ro_va(s) :
525 gk20a_semaphore_gpu_rw_va(s); 525 nvgpu_semaphore_gpu_rw_va(s);
526 526
527 /* 527 /*
528 * If the op is not an acquire (so therefor a release) we should 528 * If the op is not an acquire (so therefor a release) we should
529 * incr the underlying sema next_value. 529 * incr the underlying sema next_value.
530 */ 530 */
531 if (!acquire) 531 if (!acquire)
532 gk20a_semaphore_incr(s); 532 nvgpu_semaphore_incr(s);
533 533
534 /* semaphore_a */ 534 /* semaphore_a */
535 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010004); 535 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010004);
@@ -545,7 +545,7 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
545 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010006); 545 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010006);
546 /* payload */ 546 /* payload */
547 gk20a_mem_wr32(g, cmd->mem, off++, 547 gk20a_mem_wr32(g, cmd->mem, off++,
548 gk20a_semaphore_get_value(s)); 548 nvgpu_semaphore_get_value(s));
549 /* semaphore_d */ 549 /* semaphore_d */
550 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010007); 550 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010007);
551 /* operation: acq_geq, switch_en */ 551 /* operation: acq_geq, switch_en */
@@ -555,7 +555,7 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
555 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010006); 555 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010006);
556 /* payload */ 556 /* payload */
557 gk20a_mem_wr32(g, cmd->mem, off++, 557 gk20a_mem_wr32(g, cmd->mem, off++,
558 gk20a_semaphore_get_value(s)); 558 nvgpu_semaphore_get_value(s));
559 /* semaphore_d */ 559 /* semaphore_d */
560 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010007); 560 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010007);
561 /* operation: release, wfi */ 561 /* operation: release, wfi */
@@ -570,13 +570,13 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
570 if (acquire) 570 if (acquire)
571 gpu_sema_verbose_dbg("(A) c=%d ACQ_GE %-4u owner=%-3d" 571 gpu_sema_verbose_dbg("(A) c=%d ACQ_GE %-4u owner=%-3d"
572 "va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u", 572 "va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u",
573 ch, gk20a_semaphore_get_value(s), 573 ch, nvgpu_semaphore_get_value(s),
574 s->hw_sema->ch->hw_chid, va, cmd->gva, 574 s->hw_sema->ch->hw_chid, va, cmd->gva,
575 cmd->mem->gpu_va, ob); 575 cmd->mem->gpu_va, ob);
576 else 576 else
577 gpu_sema_verbose_dbg("(R) c=%d INCR %u (%u) va=0x%llx " 577 gpu_sema_verbose_dbg("(R) c=%d INCR %u (%u) va=0x%llx "
578 "cmd_mem=0x%llx b=0x%llx off=%u", 578 "cmd_mem=0x%llx b=0x%llx off=%u",
579 ch, gk20a_semaphore_get_value(s), 579 ch, nvgpu_semaphore_get_value(s),
580 readl(s->hw_sema->value), va, cmd->gva, 580 readl(s->hw_sema->value), va, cmd->gva,
581 cmd->mem->gpu_va, ob); 581 cmd->mem->gpu_va, ob);
582} 582}
@@ -596,7 +596,7 @@ static int gk20a_channel_semaphore_wait_syncpt(
596#ifdef CONFIG_SYNC 596#ifdef CONFIG_SYNC
597/* 597/*
598 * Attempt a fast path for waiting on a sync_fence. Basically if the passed 598 * Attempt a fast path for waiting on a sync_fence. Basically if the passed
599 * sync_fence is backed by a gk20a_semaphore then there's no reason to go 599 * sync_fence is backed by a nvgpu_semaphore then there's no reason to go
600 * through the rigmarole of setting up a separate semaphore which waits on an 600 * through the rigmarole of setting up a separate semaphore which waits on an
601 * interrupt from the GPU and then triggers a worker thread to execute a SW 601 * interrupt from the GPU and then triggers a worker thread to execute a SW
602 * based semaphore release. Instead just have the GPU wait on the same semaphore 602 * based semaphore release. Instead just have the GPU wait on the same semaphore
@@ -609,9 +609,9 @@ static int gk20a_channel_semaphore_wait_syncpt(
609static int __semaphore_wait_fd_fast_path(struct channel_gk20a *c, 609static int __semaphore_wait_fd_fast_path(struct channel_gk20a *c,
610 struct sync_fence *fence, 610 struct sync_fence *fence,
611 struct priv_cmd_entry *wait_cmd, 611 struct priv_cmd_entry *wait_cmd,
612 struct gk20a_semaphore **fp_sema) 612 struct nvgpu_semaphore **fp_sema)
613{ 613{
614 struct gk20a_semaphore *sema; 614 struct nvgpu_semaphore *sema;
615 int err; 615 int err;
616 616
617 if (!gk20a_is_sema_backed_sync_fence(fence)) 617 if (!gk20a_is_sema_backed_sync_fence(fence))
@@ -632,7 +632,7 @@ static int __semaphore_wait_fd_fast_path(struct channel_gk20a *c,
632 if (err) 632 if (err)
633 return err; 633 return err;
634 634
635 gk20a_semaphore_get(sema); 635 nvgpu_semaphore_get(sema);
636 BUG_ON(!atomic_read(&sema->value)); 636 BUG_ON(!atomic_read(&sema->value));
637 add_sema_cmd(c->g, c, sema, wait_cmd, 8, true, false); 637 add_sema_cmd(c->g, c, sema, wait_cmd, 8, true, false);
638 638
@@ -655,7 +655,7 @@ static int gk20a_channel_semaphore_wait_fd(
655 container_of(s, struct gk20a_channel_semaphore, ops); 655 container_of(s, struct gk20a_channel_semaphore, ops);
656 struct channel_gk20a *c = sema->c; 656 struct channel_gk20a *c = sema->c;
657#ifdef CONFIG_SYNC 657#ifdef CONFIG_SYNC
658 struct gk20a_semaphore *fp_sema; 658 struct nvgpu_semaphore *fp_sema;
659 struct sync_fence *sync_fence; 659 struct sync_fence *sync_fence;
660 struct priv_cmd_entry *wait_cmd = entry; 660 struct priv_cmd_entry *wait_cmd = entry;
661 struct wait_fence_work *w = NULL; 661 struct wait_fence_work *w = NULL;
@@ -674,7 +674,7 @@ static int gk20a_channel_semaphore_wait_fd(
674 &c->semaphore_wq, 674 &c->semaphore_wq,
675 NULL, false, false); 675 NULL, false, false);
676 if (err) { 676 if (err) {
677 gk20a_semaphore_put(fp_sema); 677 nvgpu_semaphore_put(fp_sema);
678 goto clean_up_priv_cmd; 678 goto clean_up_priv_cmd;
679 } 679 }
680 } else 680 } else
@@ -716,7 +716,7 @@ static int gk20a_channel_semaphore_wait_fd(
716 w->fence = sync_fence; 716 w->fence = sync_fence;
717 w->g = c->g; 717 w->g = c->g;
718 w->ch = c; 718 w->ch = c;
719 w->sema = gk20a_semaphore_alloc(c); 719 w->sema = nvgpu_semaphore_alloc(c);
720 if (!w->sema) { 720 if (!w->sema) {
721 gk20a_err(dev_from_gk20a(c->g), "ran out of semaphores"); 721 gk20a_err(dev_from_gk20a(c->g), "ran out of semaphores");
722 err = -ENOMEM; 722 err = -ENOMEM;
@@ -724,8 +724,8 @@ static int gk20a_channel_semaphore_wait_fd(
724 } 724 }
725 725
726 /* worker takes one reference */ 726 /* worker takes one reference */
727 gk20a_semaphore_get(w->sema); 727 nvgpu_semaphore_get(w->sema);
728 gk20a_semaphore_incr(w->sema); 728 nvgpu_semaphore_incr(w->sema);
729 729
730 /* GPU unblocked when the semaphore value increments. */ 730 /* GPU unblocked when the semaphore value increments. */
731 add_sema_cmd(c->g, c, w->sema, wait_cmd, 8, true, false); 731 add_sema_cmd(c->g, c, w->sema, wait_cmd, 8, true, false);
@@ -747,12 +747,12 @@ static int gk20a_channel_semaphore_wait_fd(
747 * If the sync_fence has already signaled then the above async_wait 747 * If the sync_fence has already signaled then the above async_wait
748 * will never trigger. This causes the semaphore release op to never 748 * will never trigger. This causes the semaphore release op to never
749 * happen which, in turn, hangs the GPU. That's bad. So let's just 749 * happen which, in turn, hangs the GPU. That's bad. So let's just
750 * do the gk20a_semaphore_release() right now. 750 * do the nvgpu_semaphore_release() right now.
751 */ 751 */
752 if (ret == 1) { 752 if (ret == 1) {
753 sync_fence_put(sync_fence); 753 sync_fence_put(sync_fence);
754 gk20a_semaphore_release(w->sema); 754 nvgpu_semaphore_release(w->sema);
755 gk20a_semaphore_put(w->sema); 755 nvgpu_semaphore_put(w->sema);
756 } 756 }
757 757
758skip_slow_path: 758skip_slow_path:
@@ -763,8 +763,8 @@ clean_up_sema:
763 * Release the refs to the semaphore, including 763 * Release the refs to the semaphore, including
764 * the one for the worker since it will never run. 764 * the one for the worker since it will never run.
765 */ 765 */
766 gk20a_semaphore_put(w->sema); 766 nvgpu_semaphore_put(w->sema);
767 gk20a_semaphore_put(w->sema); 767 nvgpu_semaphore_put(w->sema);
768clean_up_worker: 768clean_up_worker:
769 kfree(w); 769 kfree(w);
770clean_up_priv_cmd: 770clean_up_priv_cmd:
@@ -790,10 +790,10 @@ static int __gk20a_channel_semaphore_incr(
790 struct gk20a_channel_semaphore *sp = 790 struct gk20a_channel_semaphore *sp =
791 container_of(s, struct gk20a_channel_semaphore, ops); 791 container_of(s, struct gk20a_channel_semaphore, ops);
792 struct channel_gk20a *c = sp->c; 792 struct channel_gk20a *c = sp->c;
793 struct gk20a_semaphore *semaphore; 793 struct nvgpu_semaphore *semaphore;
794 int err = 0; 794 int err = 0;
795 795
796 semaphore = gk20a_semaphore_alloc(c); 796 semaphore = nvgpu_semaphore_alloc(c);
797 if (!semaphore) { 797 if (!semaphore) {
798 gk20a_err(dev_from_gk20a(c->g), 798 gk20a_err(dev_from_gk20a(c->g),
799 "ran out of semaphores"); 799 "ran out of semaphores");
@@ -824,7 +824,7 @@ static int __gk20a_channel_semaphore_incr(
824clean_up_priv_cmd: 824clean_up_priv_cmd:
825 gk20a_free_priv_cmdbuf(c, incr_cmd); 825 gk20a_free_priv_cmdbuf(c, incr_cmd);
826clean_up_sema: 826clean_up_sema:
827 gk20a_semaphore_put(semaphore); 827 nvgpu_semaphore_put(semaphore);
828 return err; 828 return err;
829} 829}
830 830
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h
index d9ad12ad..c3136023 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h
@@ -23,7 +23,6 @@
23struct gk20a_channel_sync; 23struct gk20a_channel_sync;
24struct priv_cmd_entry; 24struct priv_cmd_entry;
25struct channel_gk20a; 25struct channel_gk20a;
26struct gk20a_semaphore;
27struct gk20a_fence; 26struct gk20a_fence;
28struct gk20a; 27struct gk20a;
29 28
diff --git a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
index 83fdc05d..d20229b3 100644
--- a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
@@ -109,7 +109,7 @@ static void gk20a_debug_show_channel(struct gk20a *g,
109 u32 syncpointa, syncpointb; 109 u32 syncpointa, syncpointb;
110 u32 *inst_mem; 110 u32 *inst_mem;
111 struct channel_gk20a *c = g->fifo.channel + hw_chid; 111 struct channel_gk20a *c = g->fifo.channel + hw_chid;
112 struct gk20a_semaphore_int *hw_sema = NULL; 112 struct nvgpu_semaphore_int *hw_sema = NULL;
113 113
114 if (c->hw_sema) 114 if (c->hw_sema)
115 hw_sema = c->hw_sema; 115 hw_sema = c->hw_sema;
@@ -154,7 +154,7 @@ static void gk20a_debug_show_channel(struct gk20a *g,
154 "next_val: 0x%08x addr: 0x%010llx\n", 154 "next_val: 0x%08x addr: 0x%010llx\n",
155 readl(hw_sema->value), 155 readl(hw_sema->value),
156 atomic_read(&hw_sema->next_value), 156 atomic_read(&hw_sema->next_value),
157 gk20a_hw_sema_addr(hw_sema)); 157 nvgpu_hw_sema_addr(hw_sema));
158 158
159#ifdef CONFIG_TEGRA_GK20A 159#ifdef CONFIG_TEGRA_GK20A
160 if ((pbdma_syncpointb_op_v(syncpointb) == pbdma_syncpointb_op_wait_v()) 160 if ((pbdma_syncpointb_op_v(syncpointb) == pbdma_syncpointb_op_wait_v())
diff --git a/drivers/gpu/nvgpu/gk20a/fence_gk20a.c b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
index 6bd59067..115cd2b5 100644
--- a/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
@@ -47,7 +47,7 @@ static void gk20a_fence_free(struct kref *ref)
47 sync_fence_put(f->sync_fence); 47 sync_fence_put(f->sync_fence);
48#endif 48#endif
49 if (f->semaphore) 49 if (f->semaphore)
50 gk20a_semaphore_put(f->semaphore); 50 nvgpu_semaphore_put(f->semaphore);
51 51
52 if (f->allocator) { 52 if (f->allocator) {
53 if (nvgpu_alloc_initialized(f->allocator)) 53 if (nvgpu_alloc_initialized(f->allocator))
@@ -193,39 +193,39 @@ void gk20a_init_fence(struct gk20a_fence *f,
193 193
194/* Fences that are backed by GPU semaphores: */ 194/* Fences that are backed by GPU semaphores: */
195 195
196static int gk20a_semaphore_fence_wait(struct gk20a_fence *f, long timeout) 196static int nvgpu_semaphore_fence_wait(struct gk20a_fence *f, long timeout)
197{ 197{
198 long remain; 198 long remain;
199 199
200 if (!gk20a_semaphore_is_acquired(f->semaphore)) 200 if (!nvgpu_semaphore_is_acquired(f->semaphore))
201 return 0; 201 return 0;
202 202
203 remain = wait_event_interruptible_timeout( 203 remain = wait_event_interruptible_timeout(
204 *f->semaphore_wq, 204 *f->semaphore_wq,
205 !gk20a_semaphore_is_acquired(f->semaphore), 205 !nvgpu_semaphore_is_acquired(f->semaphore),
206 timeout); 206 timeout);
207 if (remain == 0 && gk20a_semaphore_is_acquired(f->semaphore)) 207 if (remain == 0 && nvgpu_semaphore_is_acquired(f->semaphore))
208 return -ETIMEDOUT; 208 return -ETIMEDOUT;
209 else if (remain < 0) 209 else if (remain < 0)
210 return remain; 210 return remain;
211 return 0; 211 return 0;
212} 212}
213 213
214static bool gk20a_semaphore_fence_is_expired(struct gk20a_fence *f) 214static bool nvgpu_semaphore_fence_is_expired(struct gk20a_fence *f)
215{ 215{
216 return !gk20a_semaphore_is_acquired(f->semaphore); 216 return !nvgpu_semaphore_is_acquired(f->semaphore);
217} 217}
218 218
219static const struct gk20a_fence_ops gk20a_semaphore_fence_ops = { 219static const struct gk20a_fence_ops nvgpu_semaphore_fence_ops = {
220 .wait = &gk20a_semaphore_fence_wait, 220 .wait = &nvgpu_semaphore_fence_wait,
221 .is_expired = &gk20a_semaphore_fence_is_expired, 221 .is_expired = &nvgpu_semaphore_fence_is_expired,
222}; 222};
223 223
224/* This function takes ownership of the semaphore */ 224/* This function takes ownership of the semaphore */
225int gk20a_fence_from_semaphore( 225int gk20a_fence_from_semaphore(
226 struct gk20a_fence *fence_out, 226 struct gk20a_fence *fence_out,
227 struct sync_timeline *timeline, 227 struct sync_timeline *timeline,
228 struct gk20a_semaphore *semaphore, 228 struct nvgpu_semaphore *semaphore,
229 wait_queue_head_t *semaphore_wq, 229 wait_queue_head_t *semaphore_wq,
230 struct sync_fence *dependency, 230 struct sync_fence *dependency,
231 bool wfi, bool need_sync_fence) 231 bool wfi, bool need_sync_fence)
@@ -237,13 +237,13 @@ int gk20a_fence_from_semaphore(
237 if (need_sync_fence) { 237 if (need_sync_fence) {
238 sync_fence = gk20a_sync_fence_create(timeline, semaphore, 238 sync_fence = gk20a_sync_fence_create(timeline, semaphore,
239 dependency, "f-gk20a-0x%04x", 239 dependency, "f-gk20a-0x%04x",
240 gk20a_semaphore_gpu_ro_va(semaphore)); 240 nvgpu_semaphore_gpu_ro_va(semaphore));
241 if (!sync_fence) 241 if (!sync_fence)
242 return -1; 242 return -1;
243 } 243 }
244#endif 244#endif
245 245
246 gk20a_init_fence(f, &gk20a_semaphore_fence_ops, sync_fence, wfi); 246 gk20a_init_fence(f, &nvgpu_semaphore_fence_ops, sync_fence, wfi);
247 if (!f) { 247 if (!f) {
248#ifdef CONFIG_SYNC 248#ifdef CONFIG_SYNC
249 if (sync_fence) 249 if (sync_fence)
diff --git a/drivers/gpu/nvgpu/gk20a/fence_gk20a.h b/drivers/gpu/nvgpu/gk20a/fence_gk20a.h
index 5abf96e5..b4283f58 100644
--- a/drivers/gpu/nvgpu/gk20a/fence_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fence_gk20a.h
@@ -24,7 +24,7 @@
24struct platform_device; 24struct platform_device;
25struct sync_timeline; 25struct sync_timeline;
26struct sync_fence; 26struct sync_fence;
27struct gk20a_semaphore; 27struct nvgpu_semaphore;
28struct channel_gk20a; 28struct channel_gk20a;
29 29
30struct gk20a_fence_ops; 30struct gk20a_fence_ops;
@@ -38,7 +38,7 @@ struct gk20a_fence {
38 const struct gk20a_fence_ops *ops; 38 const struct gk20a_fence_ops *ops;
39 39
40 /* Valid for fences created from semaphores: */ 40 /* Valid for fences created from semaphores: */
41 struct gk20a_semaphore *semaphore; 41 struct nvgpu_semaphore *semaphore;
42 wait_queue_head_t *semaphore_wq; 42 wait_queue_head_t *semaphore_wq;
43 43
44 /* Valid for fences created from syncpoints: */ 44 /* Valid for fences created from syncpoints: */
@@ -54,7 +54,7 @@ struct gk20a_fence {
54int gk20a_fence_from_semaphore( 54int gk20a_fence_from_semaphore(
55 struct gk20a_fence *fence_out, 55 struct gk20a_fence *fence_out,
56 struct sync_timeline *timeline, 56 struct sync_timeline *timeline,
57 struct gk20a_semaphore *semaphore, 57 struct nvgpu_semaphore *semaphore,
58 wait_queue_head_t *semaphore_wq, 58 wait_queue_head_t *semaphore_wq,
59 struct sync_fence *dependency, 59 struct sync_fence *dependency,
60 bool wfi, bool need_sync_fence); 60 bool wfi, bool need_sync_fence);
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 39be4e9c..8d6d36fc 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -922,7 +922,7 @@ struct gk20a {
922 /* 922 /*
923 * A group of semaphore pools. One for each channel. 923 * A group of semaphore pools. One for each channel.
924 */ 924 */
925 struct gk20a_semaphore_sea *sema_sea; 925 struct nvgpu_semaphore_sea *sema_sea;
926 926
927 /* List of pending SW semaphore waits. */ 927 /* List of pending SW semaphore waits. */
928 struct list_head pending_sema_waits; 928 struct list_head pending_sema_waits;
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index cafb1233..c62d1f6c 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -4096,8 +4096,8 @@ static void gk20a_vm_remove_support_nofree(struct vm_gk20a *vm)
4096 */ 4096 */
4097 if (!gk20a_platform_has_syncpoints(gk20a_from_vm(vm)->dev)) { 4097 if (!gk20a_platform_has_syncpoints(gk20a_from_vm(vm)->dev)) {
4098 if (vm->sema_pool) { 4098 if (vm->sema_pool) {
4099 gk20a_semaphore_pool_unmap(vm->sema_pool, vm); 4099 nvgpu_semaphore_pool_unmap(vm->sema_pool, vm);
4100 gk20a_semaphore_pool_put(vm->sema_pool); 4100 nvgpu_semaphore_pool_put(vm->sema_pool);
4101 } 4101 }
4102 } 4102 }
4103 4103
@@ -4180,7 +4180,7 @@ const struct gk20a_mmu_level gk20a_mm_levels_128k[] = {
4180 */ 4180 */
4181static int gk20a_init_sema_pool(struct vm_gk20a *vm) 4181static int gk20a_init_sema_pool(struct vm_gk20a *vm)
4182{ 4182{
4183 struct gk20a_semaphore_sea *sema_sea; 4183 struct nvgpu_semaphore_sea *sema_sea;
4184 struct mm_gk20a *mm = vm->mm; 4184 struct mm_gk20a *mm = vm->mm;
4185 struct gk20a *g = mm->g; 4185 struct gk20a *g = mm->g;
4186 int err; 4186 int err;
@@ -4194,11 +4194,11 @@ static int gk20a_init_sema_pool(struct vm_gk20a *vm)
4194 if (vm->sema_pool) 4194 if (vm->sema_pool)
4195 return 0; 4195 return 0;
4196 4196
4197 sema_sea = gk20a_semaphore_sea_create(g); 4197 sema_sea = nvgpu_semaphore_sea_create(g);
4198 if (!sema_sea) 4198 if (!sema_sea)
4199 return -ENOMEM; 4199 return -ENOMEM;
4200 4200
4201 vm->sema_pool = gk20a_semaphore_pool_alloc(sema_sea); 4201 vm->sema_pool = nvgpu_semaphore_pool_alloc(sema_sea);
4202 if (!vm->sema_pool) 4202 if (!vm->sema_pool)
4203 return -ENOMEM; 4203 return -ENOMEM;
4204 4204
@@ -4220,9 +4220,9 @@ static int gk20a_init_sema_pool(struct vm_gk20a *vm)
4220 return -ENOMEM; 4220 return -ENOMEM;
4221 } 4221 }
4222 4222
4223 err = gk20a_semaphore_pool_map(vm->sema_pool, vm); 4223 err = nvgpu_semaphore_pool_map(vm->sema_pool, vm);
4224 if (err) { 4224 if (err) {
4225 gk20a_semaphore_pool_unmap(vm->sema_pool, vm); 4225 nvgpu_semaphore_pool_unmap(vm->sema_pool, vm);
4226 nvgpu_free(vm->vma[gmmu_page_size_small], 4226 nvgpu_free(vm->vma[gmmu_page_size_small],
4227 vm->sema_pool->gpu_va); 4227 vm->sema_pool->gpu_va);
4228 return err; 4228 return err;
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index d39ca2d0..294dc628 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -303,7 +303,7 @@ struct vm_gk20a {
303 /* 303 /*
304 * Each address space needs to have a semaphore pool. 304 * Each address space needs to have a semaphore pool.
305 */ 305 */
306 struct gk20a_semaphore_pool *sema_pool; 306 struct nvgpu_semaphore_pool *sema_pool;
307}; 307};
308 308
309struct gk20a; 309struct gk20a;
diff --git a/drivers/gpu/nvgpu/gk20a/sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/sync_gk20a.c
index e7bacac8..edfe3deb 100644
--- a/drivers/gpu/nvgpu/gk20a/sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/sync_gk20a.c
@@ -45,7 +45,7 @@ struct gk20a_sync_timeline {
45struct gk20a_sync_pt { 45struct gk20a_sync_pt {
46 struct kref refcount; 46 struct kref refcount;
47 u32 thresh; 47 u32 thresh;
48 struct gk20a_semaphore *sema; 48 struct nvgpu_semaphore *sema;
49 struct gk20a_sync_timeline *obj; 49 struct gk20a_sync_timeline *obj;
50 struct sync_fence *dep; 50 struct sync_fence *dep;
51 ktime_t dep_timestamp; 51 ktime_t dep_timestamp;
@@ -110,7 +110,7 @@ int gk20a_is_sema_backed_sync_fence(struct sync_fence *fence)
110 return 0; 110 return 0;
111} 111}
112 112
113struct gk20a_semaphore *gk20a_sync_fence_get_sema(struct sync_fence *f) 113struct nvgpu_semaphore *gk20a_sync_fence_get_sema(struct sync_fence *f)
114{ 114{
115 struct sync_pt *spt; 115 struct sync_pt *spt;
116 struct gk20a_sync_pt_inst *pti; 116 struct gk20a_sync_pt_inst *pti;
@@ -208,13 +208,13 @@ static void gk20a_sync_pt_free_shared(struct kref *ref)
208 if (pt->dep) 208 if (pt->dep)
209 sync_fence_put(pt->dep); 209 sync_fence_put(pt->dep);
210 if (pt->sema) 210 if (pt->sema)
211 gk20a_semaphore_put(pt->sema); 211 nvgpu_semaphore_put(pt->sema);
212 kfree(pt); 212 kfree(pt);
213} 213}
214 214
215static struct gk20a_sync_pt *gk20a_sync_pt_create_shared( 215static struct gk20a_sync_pt *gk20a_sync_pt_create_shared(
216 struct gk20a_sync_timeline *obj, 216 struct gk20a_sync_timeline *obj,
217 struct gk20a_semaphore *sema, 217 struct nvgpu_semaphore *sema,
218 struct sync_fence *dependency) 218 struct sync_fence *dependency)
219{ 219{
220 struct gk20a_sync_pt *shared; 220 struct gk20a_sync_pt *shared;
@@ -244,14 +244,14 @@ static struct gk20a_sync_pt *gk20a_sync_pt_create_shared(
244 244
245 spin_lock_init(&shared->lock); 245 spin_lock_init(&shared->lock);
246 246
247 gk20a_semaphore_get(sema); 247 nvgpu_semaphore_get(sema);
248 248
249 return shared; 249 return shared;
250} 250}
251 251
252static struct sync_pt *gk20a_sync_pt_create_inst( 252static struct sync_pt *gk20a_sync_pt_create_inst(
253 struct gk20a_sync_timeline *obj, 253 struct gk20a_sync_timeline *obj,
254 struct gk20a_semaphore *sema, 254 struct nvgpu_semaphore *sema,
255 struct sync_fence *dependency) 255 struct sync_fence *dependency)
256{ 256{
257 struct gk20a_sync_pt_inst *pti; 257 struct gk20a_sync_pt_inst *pti;
@@ -309,7 +309,7 @@ static int gk20a_sync_pt_has_signaled(struct sync_pt *sync_pt)
309 goto done; 309 goto done;
310 310
311 /* Acquired == not realeased yet == active == not signaled. */ 311 /* Acquired == not realeased yet == active == not signaled. */
312 signaled = !gk20a_semaphore_is_acquired(pt->sema); 312 signaled = !nvgpu_semaphore_is_acquired(pt->sema);
313 313
314 if (signaled) { 314 if (signaled) {
315 /* Update min if necessary. */ 315 /* Update min if necessary. */
@@ -341,7 +341,7 @@ static int gk20a_sync_pt_has_signaled(struct sync_pt *sync_pt)
341 } 341 }
342 342
343 /* Release the semaphore to the pool. */ 343 /* Release the semaphore to the pool. */
344 gk20a_semaphore_put(pt->sema); 344 nvgpu_semaphore_put(pt->sema);
345 pt->sema = NULL; 345 pt->sema = NULL;
346 } 346 }
347done: 347done:
@@ -410,12 +410,12 @@ static void gk20a_sync_timeline_value_str(struct sync_timeline *timeline,
410static void gk20a_sync_pt_value_str_for_sema(struct gk20a_sync_pt *pt, 410static void gk20a_sync_pt_value_str_for_sema(struct gk20a_sync_pt *pt,
411 char *str, int size) 411 char *str, int size)
412{ 412{
413 struct gk20a_semaphore *s = pt->sema; 413 struct nvgpu_semaphore *s = pt->sema;
414 414
415 snprintf(str, size, "S: c=%d [v=%u,r_v=%u]", 415 snprintf(str, size, "S: c=%d [v=%u,r_v=%u]",
416 s->hw_sema->ch->hw_chid, 416 s->hw_sema->ch->hw_chid,
417 gk20a_semaphore_get_value(s), 417 nvgpu_semaphore_get_value(s),
418 gk20a_semaphore_read(s)); 418 nvgpu_semaphore_read(s));
419} 419}
420 420
421static void gk20a_sync_pt_value_str(struct sync_pt *sync_pt, char *str, 421static void gk20a_sync_pt_value_str(struct sync_pt *sync_pt, char *str,
@@ -458,7 +458,7 @@ static int gk20a_sync_fill_driver_data(struct sync_pt *sync_pt,
458} 458}
459 459
460static const struct sync_timeline_ops gk20a_sync_timeline_ops = { 460static const struct sync_timeline_ops gk20a_sync_timeline_ops = {
461 .driver_name = "gk20a_semaphore", 461 .driver_name = "nvgpu_semaphore",
462 .dup = gk20a_sync_pt_dup_inst, 462 .dup = gk20a_sync_pt_dup_inst,
463 .has_signaled = gk20a_sync_pt_has_signaled, 463 .has_signaled = gk20a_sync_pt_has_signaled,
464 .compare = gk20a_sync_pt_compare, 464 .compare = gk20a_sync_pt_compare,
@@ -508,7 +508,7 @@ struct sync_timeline *gk20a_sync_timeline_create(
508} 508}
509 509
510struct sync_fence *gk20a_sync_fence_create(struct sync_timeline *obj, 510struct sync_fence *gk20a_sync_fence_create(struct sync_timeline *obj,
511 struct gk20a_semaphore *sema, 511 struct nvgpu_semaphore *sema,
512 struct sync_fence *dependency, 512 struct sync_fence *dependency,
513 const char *fmt, ...) 513 const char *fmt, ...)
514{ 514{
diff --git a/drivers/gpu/nvgpu/gk20a/sync_gk20a.h b/drivers/gpu/nvgpu/gk20a/sync_gk20a.h
index 88bf07cd..dcced5c8 100644
--- a/drivers/gpu/nvgpu/gk20a/sync_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/sync_gk20a.h
@@ -24,18 +24,18 @@
24struct sync_timeline; 24struct sync_timeline;
25struct sync_fence; 25struct sync_fence;
26struct sync_pt; 26struct sync_pt;
27struct gk20a_semaphore; 27struct nvgpu_semaphore;
28struct fence; 28struct fence;
29 29
30int gk20a_is_sema_backed_sync_fence(struct sync_fence *fence); 30int gk20a_is_sema_backed_sync_fence(struct sync_fence *fence);
31struct gk20a_semaphore *gk20a_sync_fence_get_sema(struct sync_fence *f); 31struct nvgpu_semaphore *gk20a_sync_fence_get_sema(struct sync_fence *f);
32 32
33#ifdef CONFIG_SYNC 33#ifdef CONFIG_SYNC
34struct sync_timeline *gk20a_sync_timeline_create(const char *fmt, ...); 34struct sync_timeline *gk20a_sync_timeline_create(const char *fmt, ...);
35void gk20a_sync_timeline_destroy(struct sync_timeline *); 35void gk20a_sync_timeline_destroy(struct sync_timeline *);
36void gk20a_sync_timeline_signal(struct sync_timeline *); 36void gk20a_sync_timeline_signal(struct sync_timeline *);
37struct sync_fence *gk20a_sync_fence_create(struct sync_timeline *, 37struct sync_fence *gk20a_sync_fence_create(struct sync_timeline *,
38 struct gk20a_semaphore *, 38 struct nvgpu_semaphore *,
39 struct sync_fence *dependency, 39 struct sync_fence *dependency,
40 const char *fmt, ...); 40 const char *fmt, ...);
41struct sync_fence *gk20a_sync_fence_fdget(int fd); 41struct sync_fence *gk20a_sync_fence_fdget(int fd);