summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu
diff options
context:
space:
mode:
authorDebarshi Dutta <ddutta@nvidia.com>2018-09-04 08:09:36 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-18 02:39:24 -0400
commit2517d59be282426eec7a97745b76d745ff36c388 (patch)
treefaf915b5cfffb781918d674ec7d769feb7e98ac8 /drivers/gpu/nvgpu
parent8381eeea4f9b4717854387068ddf9244973e7d0d (diff)
gpu: nvgpu: move channel_sync_gk20a.* to common directory
1) Move channel_sync_gk20a.* from gk20a/ to common/ directory as they donot program any hardware registers. Also as an add-on rename channel_sync_gk20a.* to channel_sync.* and update the headers in required files. 2) Rename the struct gk20a_channel_sync to struct nvgpu_channel_sync. Also, corresponding syncpt and semaphore versions of the struct alongwith related methods are renamed by removing "gk20a" from their names and adding "nvgpu". 3) Add misra-c cleanups Jira NVGPU-1086 Change-Id: I4e0e21803ca3858dd7a5fc4d2454dba1f1bfcecd Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1812594 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r--drivers/gpu/nvgpu/Makefile4
-rw-r--r--drivers/gpu/nvgpu/Makefile.sources2
-rw-r--r--drivers/gpu/nvgpu/common/fifo/channel.c14
-rw-r--r--drivers/gpu/nvgpu/common/fifo/submit.c6
-rw-r--r--drivers/gpu/nvgpu/common/sync/channel_sync.c (renamed from drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c)266
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gp10b/fifo_gp10b.c2
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/channel.h6
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/channel_sync.h (renamed from drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h)43
-rw-r--r--drivers/gpu/nvgpu/os/linux/ioctl_channel.c4
-rw-r--r--drivers/gpu/nvgpu/os/linux/os_fence_android_sema.c4
-rw-r--r--drivers/gpu/nvgpu/os/linux/os_fence_android_syncpt.c4
12 files changed, 184 insertions, 175 deletions
diff --git a/drivers/gpu/nvgpu/Makefile b/drivers/gpu/nvgpu/Makefile
index d0dd252e..d59c3f74 100644
--- a/drivers/gpu/nvgpu/Makefile
+++ b/drivers/gpu/nvgpu/Makefile
@@ -50,7 +50,8 @@ nvgpu-y += common/bus/bus_gk20a.o \
50 common/mc/mc_gm20b.o \ 50 common/mc/mc_gm20b.o \
51 common/mc/mc_gp10b.o \ 51 common/mc/mc_gp10b.o \
52 common/mc/mc_gv11b.o \ 52 common/mc/mc_gv11b.o \
53 common/mc/mc_gv100.o 53 common/mc/mc_gv100.o \
54 common/sync/channel_sync.o
54 55
55# Linux specific parts of nvgpu. 56# Linux specific parts of nvgpu.
56nvgpu-y += \ 57nvgpu-y += \
@@ -228,7 +229,6 @@ nvgpu-y += \
228 gk20a/gk20a.o \ 229 gk20a/gk20a.o \
229 gk20a/ce2_gk20a.o \ 230 gk20a/ce2_gk20a.o \
230 gk20a/fifo_gk20a.o \ 231 gk20a/fifo_gk20a.o \
231 gk20a/channel_sync_gk20a.o \
232 gk20a/dbg_gpu_gk20a.o \ 232 gk20a/dbg_gpu_gk20a.o \
233 gk20a/regops_gk20a.o \ 233 gk20a/regops_gk20a.o \
234 gk20a/gr_gk20a.o \ 234 gk20a/gr_gk20a.o \
diff --git a/drivers/gpu/nvgpu/Makefile.sources b/drivers/gpu/nvgpu/Makefile.sources
index a53548f4..fce8ea71 100644
--- a/drivers/gpu/nvgpu/Makefile.sources
+++ b/drivers/gpu/nvgpu/Makefile.sources
@@ -100,6 +100,7 @@ srcs := os/posix/nvgpu.c \
100 common/pmu/pmu_perfmon.c \ 100 common/pmu/pmu_perfmon.c \
101 common/pmu/pmu_debug.c \ 101 common/pmu/pmu_debug.c \
102 common/ptimer/ptimer.c \ 102 common/ptimer/ptimer.c \
103 common/sync/channel_sync.c \
103 common/clock_gating/gm20b_gating_reglist.c \ 104 common/clock_gating/gm20b_gating_reglist.c \
104 common/clock_gating/gp10b_gating_reglist.c \ 105 common/clock_gating/gp10b_gating_reglist.c \
105 common/clock_gating/gv11b_gating_reglist.c \ 106 common/clock_gating/gv11b_gating_reglist.c \
@@ -147,7 +148,6 @@ srcs := os/posix/nvgpu.c \
147 common/ptimer/ptimer_gk20a.c \ 148 common/ptimer/ptimer_gk20a.c \
148 gk20a/ce2_gk20a.c \ 149 gk20a/ce2_gk20a.c \
149 gk20a/fifo_gk20a.c \ 150 gk20a/fifo_gk20a.c \
150 gk20a/channel_sync_gk20a.c \
151 gk20a/dbg_gpu_gk20a.c \ 151 gk20a/dbg_gpu_gk20a.c \
152 gk20a/regops_gk20a.c \ 152 gk20a/regops_gk20a.c \
153 gk20a/gr_gk20a.c \ 153 gk20a/gr_gk20a.c \
diff --git a/drivers/gpu/nvgpu/common/fifo/channel.c b/drivers/gpu/nvgpu/common/fifo/channel.c
index 45f5b736..1613f5f6 100644
--- a/drivers/gpu/nvgpu/common/fifo/channel.c
+++ b/drivers/gpu/nvgpu/common/fifo/channel.c
@@ -44,11 +44,11 @@
44#include <nvgpu/log2.h> 44#include <nvgpu/log2.h>
45#include <nvgpu/ptimer.h> 45#include <nvgpu/ptimer.h>
46#include <nvgpu/channel.h> 46#include <nvgpu/channel.h>
47#include <nvgpu/channel_sync.h>
47 48
48#include "gk20a/gk20a.h" 49#include "gk20a/gk20a.h"
49#include "gk20a/dbg_gpu_gk20a.h" 50#include "gk20a/dbg_gpu_gk20a.h"
50#include "gk20a/fence_gk20a.h" 51#include "gk20a/fence_gk20a.h"
51#include "gk20a/channel_sync_gk20a.h"
52 52
53static void free_channel(struct fifo_gk20a *f, struct channel_gk20a *c); 53static void free_channel(struct fifo_gk20a *f, struct channel_gk20a *c);
54static void gk20a_channel_dump_ref_actions(struct channel_gk20a *c); 54static void gk20a_channel_dump_ref_actions(struct channel_gk20a *c);
@@ -416,7 +416,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
416 /* sync must be destroyed before releasing channel vm */ 416 /* sync must be destroyed before releasing channel vm */
417 nvgpu_mutex_acquire(&ch->sync_lock); 417 nvgpu_mutex_acquire(&ch->sync_lock);
418 if (ch->sync) { 418 if (ch->sync) {
419 gk20a_channel_sync_destroy(ch->sync, false); 419 nvgpu_channel_sync_destroy(ch->sync, false);
420 ch->sync = NULL; 420 ch->sync = NULL;
421 } 421 }
422 if (ch->user_sync) { 422 if (ch->user_sync) {
@@ -425,9 +425,9 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
425 * But it's already done if channel has timedout 425 * But it's already done if channel has timedout
426 */ 426 */
427 if (ch->has_timedout) { 427 if (ch->has_timedout) {
428 gk20a_channel_sync_destroy(ch->user_sync, false); 428 nvgpu_channel_sync_destroy(ch->user_sync, false);
429 } else { 429 } else {
430 gk20a_channel_sync_destroy(ch->user_sync, true); 430 nvgpu_channel_sync_destroy(ch->user_sync, true);
431 } 431 }
432 ch->user_sync = NULL; 432 ch->user_sync = NULL;
433 } 433 }
@@ -1191,7 +1191,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1191 1191
1192 if (g->aggressive_sync_destroy_thresh == 0U) { 1192 if (g->aggressive_sync_destroy_thresh == 0U) {
1193 nvgpu_mutex_acquire(&c->sync_lock); 1193 nvgpu_mutex_acquire(&c->sync_lock);
1194 c->sync = gk20a_channel_sync_create(c, false); 1194 c->sync = nvgpu_channel_sync_create(c, false);
1195 if (c->sync == NULL) { 1195 if (c->sync == NULL) {
1196 err = -ENOMEM; 1196 err = -ENOMEM;
1197 nvgpu_mutex_release(&c->sync_lock); 1197 nvgpu_mutex_release(&c->sync_lock);
@@ -1253,7 +1253,7 @@ clean_up_prealloc:
1253 } 1253 }
1254clean_up_sync: 1254clean_up_sync:
1255 if (c->sync) { 1255 if (c->sync) {
1256 gk20a_channel_sync_destroy(c->sync, false); 1256 nvgpu_channel_sync_destroy(c->sync, false);
1257 c->sync = NULL; 1257 c->sync = NULL;
1258 } 1258 }
1259clean_up_unmap: 1259clean_up_unmap:
@@ -1984,7 +1984,7 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
1984 if (nvgpu_atomic_dec_and_test( 1984 if (nvgpu_atomic_dec_and_test(
1985 &c->sync->refcount) && 1985 &c->sync->refcount) &&
1986 g->aggressive_sync_destroy) { 1986 g->aggressive_sync_destroy) {
1987 gk20a_channel_sync_destroy(c->sync, 1987 nvgpu_channel_sync_destroy(c->sync,
1988 false); 1988 false);
1989 c->sync = NULL; 1989 c->sync = NULL;
1990 } 1990 }
diff --git a/drivers/gpu/nvgpu/common/fifo/submit.c b/drivers/gpu/nvgpu/common/fifo/submit.c
index 1f7a04a2..5a0beea9 100644
--- a/drivers/gpu/nvgpu/common/fifo/submit.c
+++ b/drivers/gpu/nvgpu/common/fifo/submit.c
@@ -24,12 +24,12 @@
24#include <nvgpu/ltc.h> 24#include <nvgpu/ltc.h>
25#include <nvgpu/os_sched.h> 25#include <nvgpu/os_sched.h>
26#include <nvgpu/utils.h> 26#include <nvgpu/utils.h>
27#include <nvgpu/channel_sync.h>
27 28
28#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h> 29#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h>
29 30
30#include "gk20a/gk20a.h" 31#include "gk20a/gk20a.h"
31#include "gk20a/fence_gk20a.h" 32#include "gk20a/fence_gk20a.h"
32#include "gk20a/channel_sync_gk20a.h"
33 33
34#include <trace/events/gk20a.h> 34#include <trace/events/gk20a.h>
35 35
@@ -56,7 +56,7 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c,
56 if (g->aggressive_sync_destroy_thresh) { 56 if (g->aggressive_sync_destroy_thresh) {
57 nvgpu_mutex_acquire(&c->sync_lock); 57 nvgpu_mutex_acquire(&c->sync_lock);
58 if (!c->sync) { 58 if (!c->sync) {
59 c->sync = gk20a_channel_sync_create(c, false); 59 c->sync = nvgpu_channel_sync_create(c, false);
60 if (!c->sync) { 60 if (!c->sync) {
61 err = -ENOMEM; 61 err = -ENOMEM;
62 nvgpu_mutex_release(&c->sync_lock); 62 nvgpu_mutex_release(&c->sync_lock);
@@ -409,7 +409,7 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
409 } 409 }
410 410
411 need_sync_framework = 411 need_sync_framework =
412 gk20a_channel_sync_needs_sync_framework(g) || 412 nvgpu_channel_sync_needs_os_fence_framework(g) ||
413 (flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE && 413 (flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE &&
414 flags & NVGPU_SUBMIT_FLAGS_FENCE_GET); 414 flags & NVGPU_SUBMIT_FLAGS_FENCE_GET);
415 415
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/common/sync/channel_sync.c
index d7399403..b4caab38 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ b/drivers/gpu/nvgpu/common/sync/channel_sync.c
@@ -31,23 +31,23 @@
31#include <nvgpu/nvhost.h> 31#include <nvgpu/nvhost.h>
32#include <nvgpu/os_fence.h> 32#include <nvgpu/os_fence.h>
33#include <nvgpu/channel.h> 33#include <nvgpu/channel.h>
34#include <nvgpu/channel_sync.h>
34 35
35#include "channel_sync_gk20a.h" 36#include "gk20a/gk20a.h"
36#include "gk20a.h" 37#include "gk20a/fence_gk20a.h"
37#include "fence_gk20a.h" 38#include "gk20a/mm_gk20a.h"
38#include "mm_gk20a.h"
39 39
40#ifdef CONFIG_TEGRA_GK20A_NVHOST 40#ifdef CONFIG_TEGRA_GK20A_NVHOST
41 41
42struct gk20a_channel_syncpt { 42struct nvgpu_channel_sync_syncpt {
43 struct gk20a_channel_sync ops; 43 struct nvgpu_channel_sync ops;
44 struct channel_gk20a *c; 44 struct channel_gk20a *c;
45 struct nvgpu_nvhost_dev *nvhost_dev; 45 struct nvgpu_nvhost_dev *nvhost_dev;
46 u32 id; 46 u32 id;
47 struct nvgpu_mem syncpt_buf; 47 struct nvgpu_mem syncpt_buf;
48}; 48};
49 49
50int gk20a_channel_gen_syncpt_wait_cmd(struct channel_gk20a *c, 50int channel_sync_syncpt_gen_wait_cmd(struct channel_gk20a *c,
51 u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd, 51 u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd,
52 u32 wait_cmd_size, int pos, bool preallocated) 52 u32 wait_cmd_size, int pos, bool preallocated)
53{ 53{
@@ -58,14 +58,14 @@ int gk20a_channel_gen_syncpt_wait_cmd(struct channel_gk20a *c,
58 if (is_expired) { 58 if (is_expired) {
59 if (preallocated) { 59 if (preallocated) {
60 nvgpu_memset(c->g, wait_cmd->mem, 60 nvgpu_memset(c->g, wait_cmd->mem,
61 (wait_cmd->off + pos * wait_cmd_size) * sizeof(u32), 61 (wait_cmd->off + (u32)pos * wait_cmd_size) * (u32)sizeof(u32),
62 0, wait_cmd_size * sizeof(u32)); 62 0, wait_cmd_size * (u32)sizeof(u32));
63 } 63 }
64 } else { 64 } else {
65 if (!preallocated) { 65 if (!preallocated) {
66 err = gk20a_channel_alloc_priv_cmdbuf(c, 66 err = gk20a_channel_alloc_priv_cmdbuf(c,
67 c->g->ops.fifo.get_syncpt_wait_cmd_size(), wait_cmd); 67 c->g->ops.fifo.get_syncpt_wait_cmd_size(), wait_cmd);
68 if (err) { 68 if (err != 0) {
69 nvgpu_err(c->g, "not enough priv cmd buffer space"); 69 nvgpu_err(c->g, "not enough priv cmd buffer space");
70 return err; 70 return err;
71 } 71 }
@@ -73,43 +73,45 @@ int gk20a_channel_gen_syncpt_wait_cmd(struct channel_gk20a *c,
73 nvgpu_log(c->g, gpu_dbg_info, "sp->id %d gpu va %llx", 73 nvgpu_log(c->g, gpu_dbg_info, "sp->id %d gpu va %llx",
74 id, c->vm->syncpt_ro_map_gpu_va); 74 id, c->vm->syncpt_ro_map_gpu_va);
75 c->g->ops.fifo.add_syncpt_wait_cmd(c->g, wait_cmd, 75 c->g->ops.fifo.add_syncpt_wait_cmd(c->g, wait_cmd,
76 pos * wait_cmd_size, id, thresh, 76 (u32)pos * wait_cmd_size, id, thresh,
77 c->vm->syncpt_ro_map_gpu_va); 77 c->vm->syncpt_ro_map_gpu_va);
78 } 78 }
79 79
80 return 0; 80 return 0;
81} 81}
82 82
83static int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s, 83static int channel_sync_syncpt_wait_raw(struct nvgpu_channel_sync *s,
84 u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd) 84 u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd)
85{ 85{
86 struct gk20a_channel_syncpt *sp = 86 struct nvgpu_channel_sync_syncpt *sp =
87 container_of(s, struct gk20a_channel_syncpt, ops); 87 container_of(s, struct nvgpu_channel_sync_syncpt, ops);
88 struct channel_gk20a *c = sp->c; 88 struct channel_gk20a *c = sp->c;
89 int err = 0; 89 int err = 0;
90 u32 wait_cmd_size = c->g->ops.fifo.get_syncpt_wait_cmd_size(); 90 u32 wait_cmd_size = c->g->ops.fifo.get_syncpt_wait_cmd_size();
91 91
92 if (!nvgpu_nvhost_syncpt_is_valid_pt_ext(sp->nvhost_dev, id)) 92 if (!nvgpu_nvhost_syncpt_is_valid_pt_ext(sp->nvhost_dev, id)) {
93 return -EINVAL; 93 return -EINVAL;
94 }
94 95
95 err = gk20a_channel_gen_syncpt_wait_cmd(c, id, thresh, 96 err = channel_sync_syncpt_gen_wait_cmd(c, id, thresh,
96 wait_cmd, wait_cmd_size, 0, false); 97 wait_cmd, wait_cmd_size, 0, false);
97 98
98 return err; 99 return err;
99} 100}
100 101
101static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd, 102static int channel_sync_syncpt_wait_fd(struct nvgpu_channel_sync *s, int fd,
102 struct priv_cmd_entry *wait_cmd, int max_wait_cmds) 103 struct priv_cmd_entry *wait_cmd, int max_wait_cmds)
103{ 104{
104 struct nvgpu_os_fence os_fence = {0}; 105 struct nvgpu_os_fence os_fence = {0};
105 struct gk20a_channel_syncpt *sp = 106 struct nvgpu_channel_sync_syncpt *sp =
106 container_of(s, struct gk20a_channel_syncpt, ops); 107 container_of(s, struct nvgpu_channel_sync_syncpt, ops);
107 struct channel_gk20a *c = sp->c; 108 struct channel_gk20a *c = sp->c;
108 int err = 0; 109 int err = 0;
109 110
110 err = nvgpu_os_fence_fdget(&os_fence, c, fd); 111 err = nvgpu_os_fence_fdget(&os_fence, c, fd);
111 if (err) 112 if (err != 0) {
112 return -EINVAL; 113 return -EINVAL;
114 }
113 115
114 err = os_fence.ops->program_waits(&os_fence, 116 err = os_fence.ops->program_waits(&os_fence,
115 wait_cmd, c, max_wait_cmds); 117 wait_cmd, c, max_wait_cmds);
@@ -119,17 +121,17 @@ static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
119 return err; 121 return err;
120} 122}
121 123
122static void gk20a_channel_syncpt_update(void *priv, int nr_completed) 124static void channel_sync_syncpt_update(void *priv, int nr_completed)
123{ 125{
124 struct channel_gk20a *ch = priv; 126 struct channel_gk20a *ch = priv;
125 127
126 gk20a_channel_update(ch); 128 gk20a_channel_update(ch);
127 129
128 /* note: channel_get() is in __gk20a_channel_syncpt_incr() */ 130 /* note: channel_get() is in channel_sync_syncpt_incr_common() */
129 gk20a_channel_put(ch); 131 gk20a_channel_put(ch);
130} 132}
131 133
132static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s, 134static int channel_sync_syncpt_incr_common(struct nvgpu_channel_sync *s,
133 bool wfi_cmd, 135 bool wfi_cmd,
134 bool register_irq, 136 bool register_irq,
135 struct priv_cmd_entry *incr_cmd, 137 struct priv_cmd_entry *incr_cmd,
@@ -138,16 +140,17 @@ static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
138{ 140{
139 u32 thresh; 141 u32 thresh;
140 int err; 142 int err;
141 struct gk20a_channel_syncpt *sp = 143 struct nvgpu_channel_sync_syncpt *sp =
142 container_of(s, struct gk20a_channel_syncpt, ops); 144 container_of(s, struct nvgpu_channel_sync_syncpt, ops);
143 struct channel_gk20a *c = sp->c; 145 struct channel_gk20a *c = sp->c;
144 struct nvgpu_os_fence os_fence = {0}; 146 struct nvgpu_os_fence os_fence = {0};
145 147
146 err = gk20a_channel_alloc_priv_cmdbuf(c, 148 err = gk20a_channel_alloc_priv_cmdbuf(c,
147 c->g->ops.fifo.get_syncpt_incr_cmd_size(wfi_cmd), 149 c->g->ops.fifo.get_syncpt_incr_cmd_size(wfi_cmd),
148 incr_cmd); 150 incr_cmd);
149 if (err) 151 if (err != 0) {
150 return err; 152 return err;
153 }
151 154
152 nvgpu_log(c->g, gpu_dbg_info, "sp->id %d gpu va %llx", 155 nvgpu_log(c->g, gpu_dbg_info, "sp->id %d gpu va %llx",
153 sp->id, sp->syncpt_buf.gpu_va); 156 sp->id, sp->syncpt_buf.gpu_va);
@@ -164,14 +167,15 @@ static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
164 167
165 if (referenced) { 168 if (referenced) {
166 /* note: channel_put() is in 169 /* note: channel_put() is in
167 * gk20a_channel_syncpt_update() */ 170 * channel_sync_syncpt_update() */
168 171
169 err = nvgpu_nvhost_intr_register_notifier( 172 err = nvgpu_nvhost_intr_register_notifier(
170 sp->nvhost_dev, 173 sp->nvhost_dev,
171 sp->id, thresh, 174 sp->id, thresh,
172 gk20a_channel_syncpt_update, c); 175 channel_sync_syncpt_update, c);
173 if (err) 176 if (err != 0) {
174 gk20a_channel_put(referenced); 177 gk20a_channel_put(referenced);
178 }
175 179
176 /* Adding interrupt action should 180 /* Adding interrupt action should
177 * never fail. A proper error handling 181 * never fail. A proper error handling
@@ -187,16 +191,18 @@ static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
187 err = nvgpu_os_fence_syncpt_create(&os_fence, c, sp->nvhost_dev, 191 err = nvgpu_os_fence_syncpt_create(&os_fence, c, sp->nvhost_dev,
188 sp->id, thresh); 192 sp->id, thresh);
189 193
190 if (err) 194 if (err != 0) {
191 goto clean_up_priv_cmd; 195 goto clean_up_priv_cmd;
196 }
192 } 197 }
193 198
194 err = gk20a_fence_from_syncpt(fence, sp->nvhost_dev, 199 err = gk20a_fence_from_syncpt(fence, sp->nvhost_dev,
195 sp->id, thresh, os_fence); 200 sp->id, thresh, os_fence);
196 201
197 if (err) { 202 if (err != 0) {
198 if (nvgpu_os_fence_is_initialized(&os_fence)) 203 if (nvgpu_os_fence_is_initialized(&os_fence) != 0) {
199 os_fence.ops->drop_ref(&os_fence); 204 os_fence.ops->drop_ref(&os_fence);
205 }
200 goto clean_up_priv_cmd; 206 goto clean_up_priv_cmd;
201 } 207 }
202 208
@@ -207,7 +213,7 @@ clean_up_priv_cmd:
207 return err; 213 return err;
208} 214}
209 215
210static int gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s, 216static int channel_sync_syncpt_incr(struct nvgpu_channel_sync *s,
211 struct priv_cmd_entry *entry, 217 struct priv_cmd_entry *entry,
212 struct gk20a_fence *fence, 218 struct gk20a_fence *fence,
213 bool need_sync_fence, 219 bool need_sync_fence,
@@ -215,13 +221,13 @@ static int gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
215{ 221{
216 /* Don't put wfi cmd to this one since we're not returning 222 /* Don't put wfi cmd to this one since we're not returning
217 * a fence to user space. */ 223 * a fence to user space. */
218 return __gk20a_channel_syncpt_incr(s, 224 return channel_sync_syncpt_incr_common(s,
219 false /* no wfi */, 225 false /* no wfi */,
220 register_irq /* register irq */, 226 register_irq /* register irq */,
221 entry, fence, need_sync_fence); 227 entry, fence, need_sync_fence);
222} 228}
223 229
224static int gk20a_channel_syncpt_incr_user(struct gk20a_channel_sync *s, 230static int channel_sync_syncpt_incr_user(struct nvgpu_channel_sync *s,
225 int wait_fence_fd, 231 int wait_fence_fd,
226 struct priv_cmd_entry *entry, 232 struct priv_cmd_entry *entry,
227 struct gk20a_fence *fence, 233 struct gk20a_fence *fence,
@@ -231,44 +237,44 @@ static int gk20a_channel_syncpt_incr_user(struct gk20a_channel_sync *s,
231{ 237{
232 /* Need to do 'wfi + host incr' since we return the fence 238 /* Need to do 'wfi + host incr' since we return the fence
233 * to user space. */ 239 * to user space. */
234 return __gk20a_channel_syncpt_incr(s, 240 return channel_sync_syncpt_incr_common(s,
235 wfi, 241 wfi,
236 register_irq /* register irq */, 242 register_irq /* register irq */,
237 entry, fence, need_sync_fence); 243 entry, fence, need_sync_fence);
238} 244}
239 245
240static void gk20a_channel_syncpt_set_min_eq_max(struct gk20a_channel_sync *s) 246static void channel_sync_syncpt_set_min_eq_max(struct nvgpu_channel_sync *s)
241{ 247{
242 struct gk20a_channel_syncpt *sp = 248 struct nvgpu_channel_sync_syncpt *sp =
243 container_of(s, struct gk20a_channel_syncpt, ops); 249 container_of(s, struct nvgpu_channel_sync_syncpt, ops);
244 nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost_dev, sp->id); 250 nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost_dev, sp->id);
245} 251}
246 252
247static void gk20a_channel_syncpt_set_safe_state(struct gk20a_channel_sync *s) 253static void channel_sync_syncpt_set_safe_state(struct nvgpu_channel_sync *s)
248{ 254{
249 struct gk20a_channel_syncpt *sp = 255 struct nvgpu_channel_sync_syncpt *sp =
250 container_of(s, struct gk20a_channel_syncpt, ops); 256 container_of(s, struct nvgpu_channel_sync_syncpt, ops);
251 nvgpu_nvhost_syncpt_set_safe_state(sp->nvhost_dev, sp->id); 257 nvgpu_nvhost_syncpt_set_safe_state(sp->nvhost_dev, sp->id);
252} 258}
253 259
254static int gk20a_channel_syncpt_id(struct gk20a_channel_sync *s) 260static int syncpt_get_id(struct nvgpu_channel_sync *s)
255{ 261{
256 struct gk20a_channel_syncpt *sp = 262 struct nvgpu_channel_sync_syncpt *sp =
257 container_of(s, struct gk20a_channel_syncpt, ops); 263 container_of(s, struct nvgpu_channel_sync_syncpt, ops);
258 return sp->id; 264 return sp->id;
259} 265}
260 266
261static u64 gk20a_channel_syncpt_address(struct gk20a_channel_sync *s) 267static u64 channel_sync_syncpt_get_address(struct nvgpu_channel_sync *s)
262{ 268{
263 struct gk20a_channel_syncpt *sp = 269 struct nvgpu_channel_sync_syncpt *sp =
264 container_of(s, struct gk20a_channel_syncpt, ops); 270 container_of(s, struct nvgpu_channel_sync_syncpt, ops);
265 return sp->syncpt_buf.gpu_va; 271 return sp->syncpt_buf.gpu_va;
266} 272}
267 273
268static void gk20a_channel_syncpt_destroy(struct gk20a_channel_sync *s) 274static void channel_sync_syncpt_destroy(struct nvgpu_channel_sync *s)
269{ 275{
270 struct gk20a_channel_syncpt *sp = 276 struct nvgpu_channel_sync_syncpt *sp =
271 container_of(s, struct gk20a_channel_syncpt, ops); 277 container_of(s, struct nvgpu_channel_sync_syncpt, ops);
272 278
273 279
274 sp->c->g->ops.fifo.free_syncpt_buf(sp->c, &sp->syncpt_buf); 280 sp->c->g->ops.fifo.free_syncpt_buf(sp->c, &sp->syncpt_buf);
@@ -278,15 +284,16 @@ static void gk20a_channel_syncpt_destroy(struct gk20a_channel_sync *s)
278 nvgpu_kfree(sp->c->g, sp); 284 nvgpu_kfree(sp->c->g, sp);
279} 285}
280 286
281static struct gk20a_channel_sync * 287static struct nvgpu_channel_sync *
282gk20a_channel_syncpt_create(struct channel_gk20a *c, bool user_managed) 288channel_sync_syncpt_create(struct channel_gk20a *c, bool user_managed)
283{ 289{
284 struct gk20a_channel_syncpt *sp; 290 struct nvgpu_channel_sync_syncpt *sp;
285 char syncpt_name[32]; 291 char syncpt_name[32];
286 292
287 sp = nvgpu_kzalloc(c->g, sizeof(*sp)); 293 sp = nvgpu_kzalloc(c->g, sizeof(*sp));
288 if (!sp) 294 if (sp == NULL) {
289 return NULL; 295 return NULL;
296 }
290 297
291 sp->c = c; 298 sp->c = c;
292 sp->nvhost_dev = c->g->nvhost_dev; 299 sp->nvhost_dev = c->g->nvhost_dev;
@@ -304,7 +311,7 @@ gk20a_channel_syncpt_create(struct channel_gk20a *c, bool user_managed)
304 sp->id = nvgpu_nvhost_get_syncpt_host_managed(sp->nvhost_dev, 311 sp->id = nvgpu_nvhost_get_syncpt_host_managed(sp->nvhost_dev,
305 c->chid, syncpt_name); 312 c->chid, syncpt_name);
306 } 313 }
307 if (!sp->id) { 314 if (sp->id == 0) {
308 nvgpu_kfree(c->g, sp); 315 nvgpu_kfree(c->g, sp);
309 nvgpu_err(c->g, "failed to get free syncpt"); 316 nvgpu_err(c->g, "failed to get free syncpt");
310 return NULL; 317 return NULL;
@@ -316,22 +323,22 @@ gk20a_channel_syncpt_create(struct channel_gk20a *c, bool user_managed)
316 nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost_dev, sp->id); 323 nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost_dev, sp->id);
317 324
318 nvgpu_atomic_set(&sp->ops.refcount, 0); 325 nvgpu_atomic_set(&sp->ops.refcount, 0);
319 sp->ops.wait_syncpt = gk20a_channel_syncpt_wait_syncpt; 326 sp->ops.wait_syncpt = channel_sync_syncpt_wait_raw;
320 sp->ops.wait_fd = gk20a_channel_syncpt_wait_fd; 327 sp->ops.wait_fd = channel_sync_syncpt_wait_fd;
321 sp->ops.incr = gk20a_channel_syncpt_incr; 328 sp->ops.incr = channel_sync_syncpt_incr;
322 sp->ops.incr_user = gk20a_channel_syncpt_incr_user; 329 sp->ops.incr_user = channel_sync_syncpt_incr_user;
323 sp->ops.set_min_eq_max = gk20a_channel_syncpt_set_min_eq_max; 330 sp->ops.set_min_eq_max = channel_sync_syncpt_set_min_eq_max;
324 sp->ops.set_safe_state = gk20a_channel_syncpt_set_safe_state; 331 sp->ops.set_safe_state = channel_sync_syncpt_set_safe_state;
325 sp->ops.syncpt_id = gk20a_channel_syncpt_id; 332 sp->ops.syncpt_id = syncpt_get_id;
326 sp->ops.syncpt_address = gk20a_channel_syncpt_address; 333 sp->ops.syncpt_address = channel_sync_syncpt_get_address;
327 sp->ops.destroy = gk20a_channel_syncpt_destroy; 334 sp->ops.destroy = channel_sync_syncpt_destroy;
328 335
329 return &sp->ops; 336 return &sp->ops;
330} 337}
331#endif /* CONFIG_TEGRA_GK20A_NVHOST */ 338#endif /* CONFIG_TEGRA_GK20A_NVHOST */
332 339
333struct gk20a_channel_semaphore { 340struct nvgpu_channel_sync_semaphore {
334 struct gk20a_channel_sync ops; 341 struct nvgpu_channel_sync ops;
335 struct channel_gk20a *c; 342 struct channel_gk20a *c;
336 343
337 /* A semaphore pool owned by this channel. */ 344 /* A semaphore pool owned by this channel. */
@@ -381,47 +388,47 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
381 } 388 }
382} 389}
383 390
384void gk20a_channel_gen_sema_wait_cmd(struct channel_gk20a *c, 391void channel_sync_semaphore_gen_wait_cmd(struct channel_gk20a *c,
385 struct nvgpu_semaphore *sema, struct priv_cmd_entry *wait_cmd, 392 struct nvgpu_semaphore *sema, struct priv_cmd_entry *wait_cmd,
386 u32 wait_cmd_size, int pos) 393 u32 wait_cmd_size, int pos)
387{ 394{
388 if (!sema) { 395 if (sema == NULL) {
389 /* expired */ 396 /* expired */
390 nvgpu_memset(c->g, wait_cmd->mem, 397 nvgpu_memset(c->g, wait_cmd->mem,
391 (wait_cmd->off + pos * wait_cmd_size) * sizeof(u32), 398 (wait_cmd->off + (u32)pos * wait_cmd_size) * (u32)sizeof(u32),
392 0, wait_cmd_size * sizeof(u32)); 399 0, wait_cmd_size * (u32)sizeof(u32));
393 } else { 400 } else {
394 WARN_ON(!sema->incremented); 401 WARN_ON(!sema->incremented);
395 add_sema_cmd(c->g, c, sema, wait_cmd, 402 add_sema_cmd(c->g, c, sema, wait_cmd,
396 pos * wait_cmd_size, true, false); 403 (u32)pos * wait_cmd_size, true, false);
397 nvgpu_semaphore_put(sema); 404 nvgpu_semaphore_put(sema);
398 } 405 }
399} 406}
400 407
401static int gk20a_channel_semaphore_wait_syncpt( 408static int channel_sync_semaphore_wait_raw_syncpt(
402 struct gk20a_channel_sync *s, u32 id, 409 struct nvgpu_channel_sync *s, u32 id,
403 u32 thresh, struct priv_cmd_entry *entry) 410 u32 thresh, struct priv_cmd_entry *entry)
404{ 411{
405 struct gk20a_channel_semaphore *sema = 412 struct nvgpu_channel_sync_semaphore *sema =
406 container_of(s, struct gk20a_channel_semaphore, ops); 413 container_of(s, struct nvgpu_channel_sync_semaphore, ops);
407 struct gk20a *g = sema->c->g; 414 struct gk20a *g = sema->c->g;
408 nvgpu_err(g, "trying to use syncpoint synchronization"); 415 nvgpu_err(g, "trying to use syncpoint synchronization");
409 return -ENODEV; 416 return -ENODEV;
410} 417}
411 418
412static int gk20a_channel_semaphore_wait_fd( 419static int channel_sync_semaphore_wait_fd(
413 struct gk20a_channel_sync *s, int fd, 420 struct nvgpu_channel_sync *s, int fd,
414 struct priv_cmd_entry *entry, int max_wait_cmds) 421 struct priv_cmd_entry *entry, int max_wait_cmds)
415{ 422{
416 struct gk20a_channel_semaphore *sema = 423 struct nvgpu_channel_sync_semaphore *sema =
417 container_of(s, struct gk20a_channel_semaphore, ops); 424 container_of(s, struct nvgpu_channel_sync_semaphore, ops);
418 struct channel_gk20a *c = sema->c; 425 struct channel_gk20a *c = sema->c;
419 426
420 struct nvgpu_os_fence os_fence = {0}; 427 struct nvgpu_os_fence os_fence = {0};
421 int err; 428 int err;
422 429
423 err = nvgpu_os_fence_fdget(&os_fence, c, fd); 430 err = nvgpu_os_fence_fdget(&os_fence, c, fd);
424 if (err) { 431 if (err != 0) {
425 return err; 432 return err;
426 } 433 }
427 434
@@ -433,22 +440,22 @@ static int gk20a_channel_semaphore_wait_fd(
433 return err; 440 return err;
434} 441}
435 442
436static int __gk20a_channel_semaphore_incr( 443static int channel_sync_semaphore_incr_common(
437 struct gk20a_channel_sync *s, bool wfi_cmd, 444 struct nvgpu_channel_sync *s, bool wfi_cmd,
438 struct priv_cmd_entry *incr_cmd, 445 struct priv_cmd_entry *incr_cmd,
439 struct gk20a_fence *fence, 446 struct gk20a_fence *fence,
440 bool need_sync_fence) 447 bool need_sync_fence)
441{ 448{
442 int incr_cmd_size; 449 u32 incr_cmd_size;
443 struct gk20a_channel_semaphore *sp = 450 struct nvgpu_channel_sync_semaphore *sp =
444 container_of(s, struct gk20a_channel_semaphore, ops); 451 container_of(s, struct nvgpu_channel_sync_semaphore, ops);
445 struct channel_gk20a *c = sp->c; 452 struct channel_gk20a *c = sp->c;
446 struct nvgpu_semaphore *semaphore; 453 struct nvgpu_semaphore *semaphore;
447 int err = 0; 454 int err = 0;
448 struct nvgpu_os_fence os_fence = {0}; 455 struct nvgpu_os_fence os_fence = {0};
449 456
450 semaphore = nvgpu_semaphore_alloc(c); 457 semaphore = nvgpu_semaphore_alloc(c);
451 if (!semaphore) { 458 if (semaphore == NULL) {
452 nvgpu_err(c->g, 459 nvgpu_err(c->g,
453 "ran out of semaphores"); 460 "ran out of semaphores");
454 return -ENOMEM; 461 return -ENOMEM;
@@ -479,8 +486,8 @@ static int __gk20a_channel_semaphore_incr(
479 &c->semaphore_wq, 486 &c->semaphore_wq,
480 os_fence); 487 os_fence);
481 488
482 if (err) { 489 if (err != 0) {
483 if (nvgpu_os_fence_is_initialized(&os_fence)) { 490 if (nvgpu_os_fence_is_initialized(&os_fence) != 0) {
484 os_fence.ops->drop_ref(&os_fence); 491 os_fence.ops->drop_ref(&os_fence);
485 } 492 }
486 goto clean_up_sema; 493 goto clean_up_sema;
@@ -493,8 +500,8 @@ clean_up_sema:
493 return err; 500 return err;
494} 501}
495 502
496static int gk20a_channel_semaphore_incr( 503static int channel_sync_semaphore_incr(
497 struct gk20a_channel_sync *s, 504 struct nvgpu_channel_sync *s,
498 struct priv_cmd_entry *entry, 505 struct priv_cmd_entry *entry,
499 struct gk20a_fence *fence, 506 struct gk20a_fence *fence,
500 bool need_sync_fence, 507 bool need_sync_fence,
@@ -502,13 +509,13 @@ static int gk20a_channel_semaphore_incr(
502{ 509{
503 /* Don't put wfi cmd to this one since we're not returning 510 /* Don't put wfi cmd to this one since we're not returning
504 * a fence to user space. */ 511 * a fence to user space. */
505 return __gk20a_channel_semaphore_incr(s, 512 return channel_sync_semaphore_incr_common(s,
506 false /* no wfi */, 513 false /* no wfi */,
507 entry, fence, need_sync_fence); 514 entry, fence, need_sync_fence);
508} 515}
509 516
510static int gk20a_channel_semaphore_incr_user( 517static int channel_sync_semaphore_incr_user(
511 struct gk20a_channel_sync *s, 518 struct nvgpu_channel_sync *s,
512 int wait_fence_fd, 519 int wait_fence_fd,
513 struct priv_cmd_entry *entry, 520 struct priv_cmd_entry *entry,
514 struct gk20a_fence *fence, 521 struct gk20a_fence *fence,
@@ -519,29 +526,30 @@ static int gk20a_channel_semaphore_incr_user(
519#ifdef CONFIG_SYNC 526#ifdef CONFIG_SYNC
520 int err; 527 int err;
521 528
522 err = __gk20a_channel_semaphore_incr(s, wfi, entry, fence, 529 err = channel_sync_semaphore_incr_common(s, wfi, entry, fence,
523 need_sync_fence); 530 need_sync_fence);
524 if (err) 531 if (err != 0) {
525 return err; 532 return err;
533 }
526 534
527 return 0; 535 return 0;
528#else 536#else
529 struct gk20a_channel_semaphore *sema = 537 struct nvgpu_channel_sync_semaphore *sema =
530 container_of(s, struct gk20a_channel_semaphore, ops); 538 container_of(s, struct nvgpu_channel_sync_semaphore, ops);
531 nvgpu_err(sema->c->g, 539 nvgpu_err(sema->c->g,
532 "trying to use sync fds with CONFIG_SYNC disabled"); 540 "trying to use sync fds with CONFIG_SYNC disabled");
533 return -ENODEV; 541 return -ENODEV;
534#endif 542#endif
535} 543}
536 544
537static void gk20a_channel_semaphore_set_min_eq_max(struct gk20a_channel_sync *s) 545static void channel_sync_semaphore_set_min_eq_max(struct nvgpu_channel_sync *s)
538{ 546{
539 struct gk20a_channel_semaphore *sp = 547 struct nvgpu_channel_sync_semaphore *sp =
540 container_of(s, struct gk20a_channel_semaphore, ops); 548 container_of(s, struct nvgpu_channel_sync_semaphore, ops);
541 struct channel_gk20a *c = sp->c; 549 struct channel_gk20a *c = sp->c;
542 bool updated; 550 bool updated;
543 551
544 if (!c->hw_sema) { 552 if (c->hw_sema == NULL) {
545 return; 553 return;
546 } 554 }
547 555
@@ -552,25 +560,25 @@ static void gk20a_channel_semaphore_set_min_eq_max(struct gk20a_channel_sync *s)
552 } 560 }
553} 561}
554 562
555static void gk20a_channel_semaphore_set_safe_state(struct gk20a_channel_sync *s) 563static void channel_sync_semaphore_set_safe_state(struct nvgpu_channel_sync *s)
556{ 564{
557 /* Nothing to do. */ 565 /* Nothing to do. */
558} 566}
559 567
560static int gk20a_channel_semaphore_syncpt_id(struct gk20a_channel_sync *s) 568static int channel_sync_semaphore_get_id(struct nvgpu_channel_sync *s)
561{ 569{
562 return -EINVAL; 570 return -EINVAL;
563} 571}
564 572
565static u64 gk20a_channel_semaphore_syncpt_address(struct gk20a_channel_sync *s) 573static u64 channel_sync_semaphore_get_address(struct nvgpu_channel_sync *s)
566{ 574{
567 return 0; 575 return 0;
568} 576}
569 577
570static void gk20a_channel_semaphore_destroy(struct gk20a_channel_sync *s) 578static void channel_sync_semaphore_destroy(struct nvgpu_channel_sync *s)
571{ 579{
572 struct gk20a_channel_semaphore *sema = 580 struct nvgpu_channel_sync_semaphore *sema =
573 container_of(s, struct gk20a_channel_semaphore, ops); 581 container_of(s, struct nvgpu_channel_sync_semaphore, ops);
574 582
575 struct channel_gk20a *c = sema->c; 583 struct channel_gk20a *c = sema->c;
576 struct gk20a *g = c->g; 584 struct gk20a *g = c->g;
@@ -586,21 +594,21 @@ static void gk20a_channel_semaphore_destroy(struct gk20a_channel_sync *s)
586 nvgpu_kfree(sema->c->g, sema); 594 nvgpu_kfree(sema->c->g, sema);
587} 595}
588 596
589static struct gk20a_channel_sync * 597static struct nvgpu_channel_sync *
590gk20a_channel_semaphore_create(struct channel_gk20a *c, bool user_managed) 598channel_sync_semaphore_create(struct channel_gk20a *c, bool user_managed)
591{ 599{
592 struct gk20a_channel_semaphore *sema; 600 struct nvgpu_channel_sync_semaphore *sema;
593 struct gk20a *g = c->g; 601 struct gk20a *g = c->g;
594 char pool_name[20]; 602 char pool_name[20];
595 int asid = -1; 603 int asid = -1;
596 int err; 604 int err;
597 605
598 if (WARN_ON(!c->vm)) { 606 if (WARN_ON(c->vm == NULL)) {
599 return NULL; 607 return NULL;
600 } 608 }
601 609
602 sema = nvgpu_kzalloc(c->g, sizeof(*sema)); 610 sema = nvgpu_kzalloc(c->g, sizeof(*sema));
603 if (!sema) { 611 if (sema == NULL) {
604 return NULL; 612 return NULL;
605 } 613 }
606 sema->c = c; 614 sema->c = c;
@@ -608,7 +616,7 @@ gk20a_channel_semaphore_create(struct channel_gk20a *c, bool user_managed)
608 sprintf(pool_name, "semaphore_pool-%d", c->chid); 616 sprintf(pool_name, "semaphore_pool-%d", c->chid);
609 sema->pool = c->vm->sema_pool; 617 sema->pool = c->vm->sema_pool;
610 618
611 if (c->vm->as_share) { 619 if (c->vm->as_share != NULL) {
612 asid = c->vm->as_share->id; 620 asid = c->vm->as_share->id;
613 } 621 }
614 622
@@ -617,27 +625,27 @@ gk20a_channel_semaphore_create(struct channel_gk20a *c, bool user_managed)
617 err = g->os_channel.init_os_fence_framework(c, 625 err = g->os_channel.init_os_fence_framework(c,
618 "gk20a_ch%d_as%d", c->chid, asid); 626 "gk20a_ch%d_as%d", c->chid, asid);
619 627
620 if (err) { 628 if (err != 0) {
621 nvgpu_kfree(g, sema); 629 nvgpu_kfree(g, sema);
622 return NULL; 630 return NULL;
623 } 631 }
624 } 632 }
625 633
626 nvgpu_atomic_set(&sema->ops.refcount, 0); 634 nvgpu_atomic_set(&sema->ops.refcount, 0);
627 sema->ops.wait_syncpt = gk20a_channel_semaphore_wait_syncpt; 635 sema->ops.wait_syncpt = channel_sync_semaphore_wait_raw_syncpt;
628 sema->ops.wait_fd = gk20a_channel_semaphore_wait_fd; 636 sema->ops.wait_fd = channel_sync_semaphore_wait_fd;
629 sema->ops.incr = gk20a_channel_semaphore_incr; 637 sema->ops.incr = channel_sync_semaphore_incr;
630 sema->ops.incr_user = gk20a_channel_semaphore_incr_user; 638 sema->ops.incr_user = channel_sync_semaphore_incr_user;
631 sema->ops.set_min_eq_max = gk20a_channel_semaphore_set_min_eq_max; 639 sema->ops.set_min_eq_max = channel_sync_semaphore_set_min_eq_max;
632 sema->ops.set_safe_state = gk20a_channel_semaphore_set_safe_state; 640 sema->ops.set_safe_state = channel_sync_semaphore_set_safe_state;
633 sema->ops.syncpt_id = gk20a_channel_semaphore_syncpt_id; 641 sema->ops.syncpt_id = channel_sync_semaphore_get_id;
634 sema->ops.syncpt_address = gk20a_channel_semaphore_syncpt_address; 642 sema->ops.syncpt_address = channel_sync_semaphore_get_address;
635 sema->ops.destroy = gk20a_channel_semaphore_destroy; 643 sema->ops.destroy = channel_sync_semaphore_destroy;
636 644
637 return &sema->ops; 645 return &sema->ops;
638} 646}
639 647
640void gk20a_channel_sync_destroy(struct gk20a_channel_sync *sync, 648void nvgpu_channel_sync_destroy(struct nvgpu_channel_sync *sync,
641 bool set_safe_state) 649 bool set_safe_state)
642{ 650{
643 if (set_safe_state) { 651 if (set_safe_state) {
@@ -646,17 +654,17 @@ void gk20a_channel_sync_destroy(struct gk20a_channel_sync *sync,
646 sync->destroy(sync); 654 sync->destroy(sync);
647} 655}
648 656
649struct gk20a_channel_sync *gk20a_channel_sync_create(struct channel_gk20a *c, 657struct nvgpu_channel_sync *nvgpu_channel_sync_create(struct channel_gk20a *c,
650 bool user_managed) 658 bool user_managed)
651{ 659{
652#ifdef CONFIG_TEGRA_GK20A_NVHOST 660#ifdef CONFIG_TEGRA_GK20A_NVHOST
653 if (gk20a_platform_has_syncpoints(c->g)) 661 if (gk20a_platform_has_syncpoints(c->g))
654 return gk20a_channel_syncpt_create(c, user_managed); 662 return channel_sync_syncpt_create(c, user_managed);
655#endif 663#endif
656 return gk20a_channel_semaphore_create(c, user_managed); 664 return channel_sync_semaphore_create(c, user_managed);
657} 665}
658 666
659bool gk20a_channel_sync_needs_sync_framework(struct gk20a *g) 667bool nvgpu_channel_sync_needs_os_fence_framework(struct gk20a *g)
660{ 668{
661 return !gk20a_platform_has_syncpoints(g); 669 return !gk20a_platform_has_syncpoints(g);
662} 670}
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c
index df16af85..74fc991d 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.c
@@ -38,11 +38,11 @@
38#include <nvgpu/clk_arb.h> 38#include <nvgpu/clk_arb.h>
39#include <nvgpu/therm.h> 39#include <nvgpu/therm.h>
40#include <nvgpu/mc.h> 40#include <nvgpu/mc.h>
41#include <nvgpu/channel_sync.h>
41 42
42#include <trace/events/gk20a.h> 43#include <trace/events/gk20a.h>
43 44
44#include "gk20a.h" 45#include "gk20a.h"
45#include "channel_sync_gk20a.h"
46 46
47#include "dbg_gpu_gk20a.h" 47#include "dbg_gpu_gk20a.h"
48#include "hal.h" 48#include "hal.h"
@@ -475,7 +475,7 @@ int gk20a_init_gpu_characteristics(struct gk20a *g)
475 * supported otherwise, provided that the user doesn't request anything 475 * supported otherwise, provided that the user doesn't request anything
476 * that depends on deferred cleanup. 476 * that depends on deferred cleanup.
477 */ 477 */
478 if (!gk20a_channel_sync_needs_sync_framework(g)) { 478 if (!nvgpu_channel_sync_needs_os_fence_framework(g)) {
479 __nvgpu_set_enabled(g, 479 __nvgpu_set_enabled(g,
480 NVGPU_SUPPORT_DETERMINISTIC_SUBMIT_FULL, 480 NVGPU_SUPPORT_DETERMINISTIC_SUBMIT_FULL,
481 true); 481 true);
diff --git a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c
index 21fed4fc..f1b318c9 100644
--- a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c
@@ -30,11 +30,11 @@
30#include <nvgpu/utils.h> 30#include <nvgpu/utils.h>
31#include <nvgpu/gk20a.h> 31#include <nvgpu/gk20a.h>
32#include <nvgpu/channel.h> 32#include <nvgpu/channel.h>
33#include <nvgpu/channel_sync.h>
33 34
34#include "fifo_gp10b.h" 35#include "fifo_gp10b.h"
35 36
36#include "gm20b/fifo_gm20b.h" 37#include "gm20b/fifo_gm20b.h"
37#include "gk20a/channel_sync_gk20a.h"
38 38
39#include <nvgpu/hw/gp10b/hw_pbdma_gp10b.h> 39#include <nvgpu/hw/gp10b/hw_pbdma_gp10b.h>
40#include <nvgpu/hw/gp10b/hw_ccsr_gp10b.h> 40#include <nvgpu/hw/gp10b/hw_ccsr_gp10b.h>
diff --git a/drivers/gpu/nvgpu/include/nvgpu/channel.h b/drivers/gpu/nvgpu/include/nvgpu/channel.h
index 6cca843e..cd4fadf8 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/channel.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/channel.h
@@ -35,7 +35,7 @@ struct gk20a;
35struct dbg_session_gk20a; 35struct dbg_session_gk20a;
36struct gk20a_fence; 36struct gk20a_fence;
37struct fifo_profile_gk20a; 37struct fifo_profile_gk20a;
38struct gk20a_channel_sync; 38struct nvgpu_channel_sync;
39struct nvgpu_gpfifo_userdata; 39struct nvgpu_gpfifo_userdata;
40 40
41/* Flags to be passed to gk20a_channel_alloc_gpfifo() */ 41/* Flags to be passed to gk20a_channel_alloc_gpfifo() */
@@ -289,8 +289,8 @@ struct channel_gk20a {
289 struct nvgpu_list_node dbg_s_list; 289 struct nvgpu_list_node dbg_s_list;
290 290
291 struct nvgpu_mutex sync_lock; 291 struct nvgpu_mutex sync_lock;
292 struct gk20a_channel_sync *sync; 292 struct nvgpu_channel_sync *sync;
293 struct gk20a_channel_sync *user_sync; 293 struct nvgpu_channel_sync *user_sync;
294 294
295#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION 295#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
296 u64 virt_ctx; 296 u64 virt_ctx;
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h b/drivers/gpu/nvgpu/include/nvgpu/channel_sync.h
index e0e318d2..b5936edc 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/channel_sync.h
@@ -1,7 +1,6 @@
1/* 1/*
2 * drivers/video/tegra/host/gk20a/channel_sync_gk20a.h
3 * 2 *
4 * GK20A Channel Synchronization Abstraction 3 * Nvgpu Channel Synchronization Abstraction
5 * 4 *
6 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. 5 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
7 * 6 *
@@ -24,29 +23,31 @@
24 * DEALINGS IN THE SOFTWARE. 23 * DEALINGS IN THE SOFTWARE.
25 */ 24 */
26 25
27#ifndef NVGPU_GK20A_CHANNEL_SYNC_GK20A_H 26#ifndef NVGPU_CHANNEL_SYNC_H
28#define NVGPU_GK20A_CHANNEL_SYNC_GK20A_H 27#define NVGPU_CHANNEL_SYNC_H
29 28
30struct gk20a_channel_sync; 29#include <nvgpu/atomic.h>
30
31struct nvgpu_channel_sync;
31struct priv_cmd_entry; 32struct priv_cmd_entry;
32struct channel_gk20a; 33struct channel_gk20a;
33struct gk20a_fence; 34struct gk20a_fence;
34struct gk20a; 35struct gk20a;
35struct nvgpu_semaphore; 36struct nvgpu_semaphore;
36 37
37struct gk20a_channel_sync { 38struct nvgpu_channel_sync {
38 nvgpu_atomic_t refcount; 39 nvgpu_atomic_t refcount;
39 40
40 /* Generate a gpu wait cmdbuf from syncpoint. 41 /* Generate a gpu wait cmdbuf from syncpoint.
41 * Returns a gpu cmdbuf that performs the wait when executed 42 * Returns a gpu cmdbuf that performs the wait when executed
42 */ 43 */
43 int (*wait_syncpt)(struct gk20a_channel_sync *s, u32 id, u32 thresh, 44 int (*wait_syncpt)(struct nvgpu_channel_sync *s, u32 id, u32 thresh,
44 struct priv_cmd_entry *entry); 45 struct priv_cmd_entry *entry);
45 46
46 /* Generate a gpu wait cmdbuf from sync fd. 47 /* Generate a gpu wait cmdbuf from sync fd.
47 * Returns a gpu cmdbuf that performs the wait when executed 48 * Returns a gpu cmdbuf that performs the wait when executed
48 */ 49 */
49 int (*wait_fd)(struct gk20a_channel_sync *s, int fd, 50 int (*wait_fd)(struct nvgpu_channel_sync *s, int fd,
50 struct priv_cmd_entry *entry, int max_wait_cmds); 51 struct priv_cmd_entry *entry, int max_wait_cmds);
51 52
52 /* Increment syncpoint/semaphore. 53 /* Increment syncpoint/semaphore.
@@ -54,7 +55,7 @@ struct gk20a_channel_sync {
54 * - a gpu cmdbuf that performs the increment when executed, 55 * - a gpu cmdbuf that performs the increment when executed,
55 * - a fence that can be passed to wait_cpu() and is_expired(). 56 * - a fence that can be passed to wait_cpu() and is_expired().
56 */ 57 */
57 int (*incr)(struct gk20a_channel_sync *s, 58 int (*incr)(struct nvgpu_channel_sync *s,
58 struct priv_cmd_entry *entry, 59 struct priv_cmd_entry *entry,
59 struct gk20a_fence *fence, 60 struct gk20a_fence *fence,
60 bool need_sync_fence, 61 bool need_sync_fence,
@@ -67,7 +68,7 @@ struct gk20a_channel_sync {
67 * - a fence that can be passed to wait_cpu() and is_expired(), 68 * - a fence that can be passed to wait_cpu() and is_expired(),
68 * - a gk20a_fence that signals when the incr has happened. 69 * - a gk20a_fence that signals when the incr has happened.
69 */ 70 */
70 int (*incr_user)(struct gk20a_channel_sync *s, 71 int (*incr_user)(struct nvgpu_channel_sync *s,
71 int wait_fence_fd, 72 int wait_fence_fd,
72 struct priv_cmd_entry *entry, 73 struct priv_cmd_entry *entry,
73 struct gk20a_fence *fence, 74 struct gk20a_fence *fence,
@@ -76,37 +77,37 @@ struct gk20a_channel_sync {
76 bool register_irq); 77 bool register_irq);
77 78
78 /* Reset the channel syncpoint/semaphore. */ 79 /* Reset the channel syncpoint/semaphore. */
79 void (*set_min_eq_max)(struct gk20a_channel_sync *s); 80 void (*set_min_eq_max)(struct nvgpu_channel_sync *s);
80 81
81 /* 82 /*
82 * Set the channel syncpoint/semaphore to safe state 83 * Set the channel syncpoint/semaphore to safe state
83 * This should be used to reset User managed syncpoint since we don't 84 * This should be used to reset User managed syncpoint since we don't
84 * track threshold values for those syncpoints 85 * track threshold values for those syncpoints
85 */ 86 */
86 void (*set_safe_state)(struct gk20a_channel_sync *s); 87 void (*set_safe_state)(struct nvgpu_channel_sync *s);
87 88
88 /* Returns the sync point id or negative number if no syncpt*/ 89 /* Returns the sync point id or negative number if no syncpt*/
89 int (*syncpt_id)(struct gk20a_channel_sync *s); 90 int (*syncpt_id)(struct nvgpu_channel_sync *s);
90 91
91 /* Returns the sync point address of sync point or 0 if not supported */ 92 /* Returns the sync point address of sync point or 0 if not supported */
92 u64 (*syncpt_address)(struct gk20a_channel_sync *s); 93 u64 (*syncpt_address)(struct nvgpu_channel_sync *s);
93 94
94 /* Free the resources allocated by gk20a_channel_sync_create. */ 95 /* Free the resources allocated by nvgpu_channel_sync_create. */
95 void (*destroy)(struct gk20a_channel_sync *s); 96 void (*destroy)(struct nvgpu_channel_sync *s);
96}; 97};
97 98
98void gk20a_channel_gen_sema_wait_cmd(struct channel_gk20a *c, 99void channel_sync_semaphore_gen_wait_cmd(struct channel_gk20a *c,
99 struct nvgpu_semaphore *sema, struct priv_cmd_entry *wait_cmd, 100 struct nvgpu_semaphore *sema, struct priv_cmd_entry *wait_cmd,
100 u32 wait_cmd_size, int pos); 101 u32 wait_cmd_size, int pos);
101 102
102int gk20a_channel_gen_syncpt_wait_cmd(struct channel_gk20a *c, 103int channel_sync_syncpt_gen_wait_cmd(struct channel_gk20a *c,
103 u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd, 104 u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd,
104 u32 wait_cmd_size, int pos, bool preallocated); 105 u32 wait_cmd_size, int pos, bool preallocated);
105 106
106void gk20a_channel_sync_destroy(struct gk20a_channel_sync *sync, 107void nvgpu_channel_sync_destroy(struct nvgpu_channel_sync *sync,
107 bool set_safe_state); 108 bool set_safe_state);
108struct gk20a_channel_sync *gk20a_channel_sync_create(struct channel_gk20a *c, 109struct nvgpu_channel_sync *nvgpu_channel_sync_create(struct channel_gk20a *c,
109 bool user_managed); 110 bool user_managed);
110bool gk20a_channel_sync_needs_sync_framework(struct gk20a *g); 111bool nvgpu_channel_sync_needs_os_fence_framework(struct gk20a *g);
111 112
112#endif /* NVGPU_GK20A_CHANNEL_SYNC_GK20A_H */ 113#endif /* NVGPU_GK20A_CHANNEL_SYNC_GK20A_H */
diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_channel.c b/drivers/gpu/nvgpu/os/linux/ioctl_channel.c
index e9a24923..0f6843df 100644
--- a/drivers/gpu/nvgpu/os/linux/ioctl_channel.c
+++ b/drivers/gpu/nvgpu/os/linux/ioctl_channel.c
@@ -35,11 +35,11 @@
35#include <nvgpu/nvhost.h> 35#include <nvgpu/nvhost.h>
36#include <nvgpu/os_sched.h> 36#include <nvgpu/os_sched.h>
37#include <nvgpu/channel.h> 37#include <nvgpu/channel.h>
38#include <nvgpu/channel_sync.h>
38 39
39#include "gk20a/gk20a.h" 40#include "gk20a/gk20a.h"
40#include "gk20a/dbg_gpu_gk20a.h" 41#include "gk20a/dbg_gpu_gk20a.h"
41#include "gk20a/fence_gk20a.h" 42#include "gk20a/fence_gk20a.h"
42#include "gk20a/channel_sync_gk20a.h"
43 43
44#include "platform_gk20a.h" 44#include "platform_gk20a.h"
45#include "ioctl_channel.h" 45#include "ioctl_channel.h"
@@ -1028,7 +1028,7 @@ static int nvgpu_ioctl_channel_get_user_syncpoint(struct channel_gk20a *ch,
1028 if (ch->user_sync) { 1028 if (ch->user_sync) {
1029 nvgpu_mutex_release(&ch->sync_lock); 1029 nvgpu_mutex_release(&ch->sync_lock);
1030 } else { 1030 } else {
1031 ch->user_sync = gk20a_channel_sync_create(ch, true); 1031 ch->user_sync = nvgpu_channel_sync_create(ch, true);
1032 if (!ch->user_sync) { 1032 if (!ch->user_sync) {
1033 nvgpu_mutex_release(&ch->sync_lock); 1033 nvgpu_mutex_release(&ch->sync_lock);
1034 return -ENOMEM; 1034 return -ENOMEM;
diff --git a/drivers/gpu/nvgpu/os/linux/os_fence_android_sema.c b/drivers/gpu/nvgpu/os/linux/os_fence_android_sema.c
index ec3ccf0d..195da64a 100644
--- a/drivers/gpu/nvgpu/os/linux/os_fence_android_sema.c
+++ b/drivers/gpu/nvgpu/os/linux/os_fence_android_sema.c
@@ -21,8 +21,8 @@
21#include <nvgpu/linux/os_fence_android.h> 21#include <nvgpu/linux/os_fence_android.h>
22#include <nvgpu/semaphore.h> 22#include <nvgpu/semaphore.h>
23#include <nvgpu/channel.h> 23#include <nvgpu/channel.h>
24#include <nvgpu/channel_sync.h>
24 25
25#include "gk20a/channel_sync_gk20a.h"
26#include "gk20a/mm_gk20a.h" 26#include "gk20a/mm_gk20a.h"
27 27
28#include "sync_sema_android.h" 28#include "sync_sema_android.h"
@@ -63,7 +63,7 @@ int nvgpu_os_fence_sema_wait_gen_cmd(struct nvgpu_os_fence *s,
63 sync_fence->cbs[i].sync_pt); 63 sync_fence->cbs[i].sync_pt);
64 64
65 sema = gk20a_sync_pt_sema(pt); 65 sema = gk20a_sync_pt_sema(pt);
66 gk20a_channel_gen_sema_wait_cmd(c, sema, wait_cmd, 66 channel_sync_semaphore_gen_wait_cmd(c, sema, wait_cmd,
67 wait_cmd_size, i); 67 wait_cmd_size, i);
68 } 68 }
69 69
diff --git a/drivers/gpu/nvgpu/os/linux/os_fence_android_syncpt.c b/drivers/gpu/nvgpu/os/linux/os_fence_android_syncpt.c
index b15dba19..fe09db8f 100644
--- a/drivers/gpu/nvgpu/os/linux/os_fence_android_syncpt.c
+++ b/drivers/gpu/nvgpu/os/linux/os_fence_android_syncpt.c
@@ -23,9 +23,9 @@
23#include <nvgpu/nvhost.h> 23#include <nvgpu/nvhost.h>
24#include <nvgpu/atomic.h> 24#include <nvgpu/atomic.h>
25#include <nvgpu/channel.h> 25#include <nvgpu/channel.h>
26#include <nvgpu/channel_sync.h>
26 27
27#include "gk20a/gk20a.h" 28#include "gk20a/gk20a.h"
28#include "gk20a/channel_sync_gk20a.h"
29#include "gk20a/mm_gk20a.h" 29#include "gk20a/mm_gk20a.h"
30 30
31#include "../drivers/staging/android/sync.h" 31#include "../drivers/staging/android/sync.h"
@@ -76,7 +76,7 @@ int nvgpu_os_fence_syncpt_wait_gen_cmd(struct nvgpu_os_fence *s,
76 u32 wait_id = nvgpu_nvhost_sync_pt_id(pt); 76 u32 wait_id = nvgpu_nvhost_sync_pt_id(pt);
77 u32 wait_value = nvgpu_nvhost_sync_pt_thresh(pt); 77 u32 wait_value = nvgpu_nvhost_sync_pt_thresh(pt);
78 78
79 err = gk20a_channel_gen_syncpt_wait_cmd(c, wait_id, wait_value, 79 err = channel_sync_syncpt_gen_wait_cmd(c, wait_id, wait_value,
80 wait_cmd, wait_cmd_size, i, true); 80 wait_cmd, wait_cmd_size, i, true);
81 } 81 }
82 82