summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu
diff options
context:
space:
mode:
authorseshendra Gadagottu <sgadagottu@nvidia.com>2017-03-17 14:30:57 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-05-26 17:07:12 -0400
commitc0822cb22e13204e06b145ae950a33d45e95918e (patch)
tree1623fcd5db0d24190e6d93287742c8d2fee14794 /drivers/gpu/nvgpu
parente21e6e947b334cd16cc92c41953bf3c0153b1508 (diff)
gpu: nvgpu: add chip specific sync point support
Added support for chip specific sync point implementation. Relevant fifo hal functions are added and updated for legacy chips. JIRA GPUT19X-2 Change-Id: I9a9c36d71e15c384b5e5af460cd52012f94e0b04 Signed-off-by: seshendra Gadagottu <sgadagottu@nvidia.com> Reviewed-on: http://git-master/r/1258232 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r--drivers/gpu/nvgpu/Makefile.nvgpu1
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c90
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c83
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.h16
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h21
-rw-r--r--drivers/gpu/nvgpu/gm20b/fifo_gm20b.c10
6 files changed, 166 insertions, 55 deletions
diff --git a/drivers/gpu/nvgpu/Makefile.nvgpu b/drivers/gpu/nvgpu/Makefile.nvgpu
index 109d1e4d..e21a9426 100644
--- a/drivers/gpu/nvgpu/Makefile.nvgpu
+++ b/drivers/gpu/nvgpu/Makefile.nvgpu
@@ -18,6 +18,7 @@ ccflags-y += -I$(srctree)/../nvgpu-t19x/drivers/gpu/nvgpu
18ccflags-y += -I$(srctree)/../nvgpu-t19x/drivers/gpu/nvgpu/include 18ccflags-y += -I$(srctree)/../nvgpu-t19x/drivers/gpu/nvgpu/include
19ccflags-y += -I$(srctree)/../nvgpu-t19x/include 19ccflags-y += -I$(srctree)/../nvgpu-t19x/include
20ccflags-y += -I$(srctree)/../nvgpu-t19x/include/uapi 20ccflags-y += -I$(srctree)/../nvgpu-t19x/include/uapi
21ccflags-y += -I$(srctree)/../nvhost-t19x/include
21endif 22endif
22 23
23obj-$(CONFIG_GK20A) := nvgpu.o 24obj-$(CONFIG_GK20A) := nvgpu.o
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
index eac7dbc3..07157dc9 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
@@ -42,22 +42,9 @@ struct gk20a_channel_syncpt {
42 struct channel_gk20a *c; 42 struct channel_gk20a *c;
43 struct platform_device *host1x_pdev; 43 struct platform_device *host1x_pdev;
44 u32 id; 44 u32 id;
45 struct nvgpu_mem syncpt_buf;
45}; 46};
46 47
47static void add_wait_cmd(struct gk20a *g, struct priv_cmd_entry *cmd, u32 off,
48 u32 id, u32 thresh)
49{
50 off = cmd->off + off;
51 /* syncpoint_a */
52 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001C);
53 /* payload */
54 nvgpu_mem_wr32(g, cmd->mem, off++, thresh);
55 /* syncpoint_b */
56 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001D);
57 /* syncpt_id, switch_en, wait */
58 nvgpu_mem_wr32(g, cmd->mem, off++, (id << 8) | 0x10);
59}
60
61static int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s, 48static int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s,
62 u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd, 49 u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd,
63 struct gk20a_fence *fence) 50 struct gk20a_fence *fence)
@@ -75,14 +62,18 @@ static int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s,
75 if (nvhost_syncpt_is_expired_ext(sp->host1x_pdev, id, thresh)) 62 if (nvhost_syncpt_is_expired_ext(sp->host1x_pdev, id, thresh))
76 return 0; 63 return 0;
77 64
78 err = gk20a_channel_alloc_priv_cmdbuf(c, 4, wait_cmd); 65 err = gk20a_channel_alloc_priv_cmdbuf(c,
66 c->g->ops.fifo.get_syncpt_wait_cmd_size(), wait_cmd);
79 if (err) { 67 if (err) {
80 nvgpu_err(c->g, 68 nvgpu_err(c->g,
81 "not enough priv cmd buffer space"); 69 "not enough priv cmd buffer space");
82 return err; 70 return err;
83 } 71 }
84 72
85 add_wait_cmd(c->g, wait_cmd, 0, id, thresh); 73 nvgpu_log(c->g, gpu_dbg_info, "sp->id %d gpu va %llx",
74 id, sp->syncpt_buf.gpu_va);
75 c->g->ops.fifo.add_syncpt_wait_cmd(c->g, wait_cmd, 0, id,
76 thresh, sp->syncpt_buf.gpu_va);
86 77
87 return 0; 78 return 0;
88} 79}
@@ -101,6 +92,7 @@ static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
101 struct channel_gk20a *c = sp->c; 92 struct channel_gk20a *c = sp->c;
102 u32 wait_id; 93 u32 wait_id;
103 int err = 0; 94 int err = 0;
95 u32 wait_cmd_size = 0;
104 96
105 sync_fence = nvhost_sync_fdget(fd); 97 sync_fence = nvhost_sync_fdget(fd);
106 if (!sync_fence) 98 if (!sync_fence)
@@ -130,8 +122,10 @@ static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
130 sync_fence_put(sync_fence); 122 sync_fence_put(sync_fence);
131 return 0; 123 return 0;
132 } 124 }
133 125 wait_cmd_size = c->g->ops.fifo.get_syncpt_wait_cmd_size();
134 err = gk20a_channel_alloc_priv_cmdbuf(c, 4 * num_wait_cmds, wait_cmd); 126 err = gk20a_channel_alloc_priv_cmdbuf(c,
127 wait_cmd_size * num_wait_cmds,
128 wait_cmd);
135 if (err) { 129 if (err) {
136 nvgpu_err(c->g, 130 nvgpu_err(c->g,
137 "not enough priv cmd buffer space"); 131 "not enough priv cmd buffer space");
@@ -152,13 +146,16 @@ static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
152 146
153 if (nvhost_syncpt_is_expired_ext(sp->host1x_pdev, 147 if (nvhost_syncpt_is_expired_ext(sp->host1x_pdev,
154 wait_id, wait_value)) { 148 wait_id, wait_value)) {
155 /* each wait_cmd is 4 u32s */
156 nvgpu_memset(c->g, wait_cmd->mem, 149 nvgpu_memset(c->g, wait_cmd->mem,
157 (wait_cmd->off + i * 4) * sizeof(u32), 150 (wait_cmd->off + i * wait_cmd_size) * sizeof(u32),
158 0, 4 * sizeof(u32)); 151 0, wait_cmd_size * sizeof(u32));
159 } else 152 } else {
160 add_wait_cmd(c->g, wait_cmd, i * 4, wait_id, 153 nvgpu_log(c->g, gpu_dbg_info, "sp->id %d gpu va %llx",
161 wait_value); 154 wait_id, sp->syncpt_buf.gpu_va);
155 c->g->ops.fifo.add_syncpt_wait_cmd(c->g, wait_cmd,
156 i * wait_cmd_size, wait_id, wait_value,
157 sp->syncpt_buf.gpu_va);
158 }
162#if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) 159#if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
163 i++; 160 i++;
164 } 161 }
@@ -193,45 +190,21 @@ static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
193 bool need_sync_fence) 190 bool need_sync_fence)
194{ 191{
195 u32 thresh; 192 u32 thresh;
196 size_t incr_cmd_size;
197 int off;
198 int err; 193 int err;
199 struct gk20a_channel_syncpt *sp = 194 struct gk20a_channel_syncpt *sp =
200 container_of(s, struct gk20a_channel_syncpt, ops); 195 container_of(s, struct gk20a_channel_syncpt, ops);
201 struct channel_gk20a *c = sp->c; 196 struct channel_gk20a *c = sp->c;
202 197
203 incr_cmd_size = 6; 198 err = gk20a_channel_alloc_priv_cmdbuf(c,
204 if (wfi_cmd) 199 c->g->ops.fifo.get_syncpt_incr_cmd_size(wfi_cmd),
205 incr_cmd_size += 2; 200 incr_cmd);
206
207 err = gk20a_channel_alloc_priv_cmdbuf(c, incr_cmd_size, incr_cmd);
208 if (err) 201 if (err)
209 return err; 202 return err;
210 203
211 off = incr_cmd->off; 204 nvgpu_log(c->g, gpu_dbg_info, "sp->id %d gpu va %llx",
212 205 sp->id, sp->syncpt_buf.gpu_va);
213 /* WAR for hw bug 1491360: syncpt needs to be incremented twice */ 206 c->g->ops.fifo.add_syncpt_incr_cmd(c->g, wfi_cmd,
214 207 incr_cmd, sp->id, sp->syncpt_buf.gpu_va);
215 if (wfi_cmd) {
216 /* wfi */
217 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, 0x2001001E);
218 /* handle, ignored */
219 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, 0x00000000);
220 }
221 /* syncpoint_a */
222 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, 0x2001001C);
223 /* payload, ignored */
224 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, 0);
225 /* syncpoint_b */
226 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, 0x2001001D);
227 /* syncpt_id, incr */
228 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, (sp->id << 8) | 0x1);
229 /* syncpoint_b */
230 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, 0x2001001D);
231 /* syncpt_id, incr */
232 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, (sp->id << 8) | 0x1);
233
234 WARN_ON(off - incr_cmd->off != incr_cmd_size);
235 208
236 thresh = nvhost_syncpt_incr_max_ext(sp->host1x_pdev, sp->id, 2); 209 thresh = nvhost_syncpt_incr_max_ext(sp->host1x_pdev, sp->id, 2);
237 210
@@ -337,6 +310,10 @@ static void gk20a_channel_syncpt_destroy(struct gk20a_channel_sync *s)
337{ 310{
338 struct gk20a_channel_syncpt *sp = 311 struct gk20a_channel_syncpt *sp =
339 container_of(s, struct gk20a_channel_syncpt, ops); 312 container_of(s, struct gk20a_channel_syncpt, ops);
313
314
315 sp->c->g->ops.fifo.free_syncpt_buf(sp->c, &sp->syncpt_buf);
316
340 nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id); 317 nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id);
341 nvhost_syncpt_put_ref_ext(sp->host1x_pdev, sp->id); 318 nvhost_syncpt_put_ref_ext(sp->host1x_pdev, sp->id);
342 nvgpu_kfree(sp->c->g, sp); 319 nvgpu_kfree(sp->c->g, sp);
@@ -366,6 +343,9 @@ gk20a_channel_syncpt_create(struct channel_gk20a *c)
366 return NULL; 343 return NULL;
367 } 344 }
368 345
346 sp->c->g->ops.fifo.alloc_syncpt_buf(sp->c, sp->id,
347 &sp->syncpt_buf);
348
369 nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id); 349 nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id);
370 350
371 atomic_set(&sp->ops.refcount, 0); 351 atomic_set(&sp->ops.refcount, 0);
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 05c13374..b8ff84df 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -4265,6 +4265,79 @@ u32 gk20a_fifo_pbdma_acquire_val(u64 timeout)
4265 return val; 4265 return val;
4266} 4266}
4267 4267
4268#ifdef CONFIG_TEGRA_GK20A_NVHOST
4269void gk20a_fifo_add_syncpt_wait_cmd(struct gk20a *g,
4270 struct priv_cmd_entry *cmd, u32 off,
4271 u32 id, u32 thresh, u64 gpu_va)
4272{
4273 gk20a_dbg_fn("");
4274
4275 off = cmd->off + off;
4276 /* syncpoint_a */
4277 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001C);
4278 /* payload */
4279 nvgpu_mem_wr32(g, cmd->mem, off++, thresh);
4280 /* syncpoint_b */
4281 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001D);
4282 /* syncpt_id, switch_en, wait */
4283 nvgpu_mem_wr32(g, cmd->mem, off++, (id << 8) | 0x10);
4284}
4285
4286u32 gk20a_fifo_get_syncpt_wait_cmd_size(void)
4287{
4288 return 4;
4289}
4290
4291void gk20a_fifo_add_syncpt_incr_cmd(struct gk20a *g,
4292 bool wfi_cmd, struct priv_cmd_entry *cmd,
4293 u32 id, u64 gpu_va)
4294{
4295 u32 off = cmd->off;
4296
4297 gk20a_dbg_fn("");
4298 if (wfi_cmd) {
4299 /* wfi */
4300 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001E);
4301 /* handle, ignored */
4302 nvgpu_mem_wr32(g, cmd->mem, off++, 0x00000000);
4303 }
4304 /* syncpoint_a */
4305 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001C);
4306 /* payload, ignored */
4307 nvgpu_mem_wr32(g, cmd->mem, off++, 0);
4308 /* syncpoint_b */
4309 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001D);
4310 /* syncpt_id, incr */
4311 nvgpu_mem_wr32(g, cmd->mem, off++, (id << 8) | 0x1);
4312 /* syncpoint_b */
4313 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001D);
4314 /* syncpt_id, incr */
4315 nvgpu_mem_wr32(g, cmd->mem, off++, (id << 8) | 0x1);
4316
4317}
4318
4319u32 gk20a_fifo_get_syncpt_incr_cmd_size(bool wfi_cmd)
4320{
4321 if (wfi_cmd)
4322 return 8;
4323 else
4324 return 6;
4325}
4326
4327void gk20a_fifo_free_syncpt_buf(struct channel_gk20a *c,
4328 struct nvgpu_mem *syncpt_buf)
4329{
4330
4331}
4332
4333int gk20a_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
4334 u32 syncpt_id, struct nvgpu_mem *syncpt_buf)
4335{
4336 return 0;
4337}
4338#endif
4339
4340
4268void gk20a_init_fifo(struct gpu_ops *gops) 4341void gk20a_init_fifo(struct gpu_ops *gops)
4269{ 4342{
4270 gops->fifo.disable_channel = gk20a_fifo_disable_channel; 4343 gops->fifo.disable_channel = gk20a_fifo_disable_channel;
@@ -4312,4 +4385,14 @@ void gk20a_init_fifo(struct gpu_ops *gops)
4312 gops->fifo.teardown_ch_tsg = gk20a_fifo_teardown_ch_tsg; 4385 gops->fifo.teardown_ch_tsg = gk20a_fifo_teardown_ch_tsg;
4313 gops->fifo.handle_sched_error = gk20a_fifo_handle_sched_error; 4386 gops->fifo.handle_sched_error = gk20a_fifo_handle_sched_error;
4314 gops->fifo.handle_pbdma_intr_0 = gk20a_fifo_handle_pbdma_intr_0; 4387 gops->fifo.handle_pbdma_intr_0 = gk20a_fifo_handle_pbdma_intr_0;
4388#ifdef CONFIG_TEGRA_GK20A_NVHOST
4389 gops->fifo.alloc_syncpt_buf = gk20a_fifo_alloc_syncpt_buf;
4390 gops->fifo.free_syncpt_buf = gk20a_fifo_free_syncpt_buf;
4391 gops->fifo.add_syncpt_wait_cmd = gk20a_fifo_add_syncpt_wait_cmd;
4392 gops->fifo.get_syncpt_wait_cmd_size =
4393 gk20a_fifo_get_syncpt_wait_cmd_size;
4394 gops->fifo.add_syncpt_incr_cmd = gk20a_fifo_add_syncpt_incr_cmd;
4395 gops->fifo.get_syncpt_incr_cmd_size =
4396 gk20a_fifo_get_syncpt_incr_cmd_size;
4397#endif
4315} 4398}
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
index 7351478a..80f1853c 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
@@ -405,4 +405,20 @@ unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id,
405 u32 pbdma_intr_0, u32 *handled, u32 *error_notifier); 405 u32 pbdma_intr_0, u32 *handled, u32 *error_notifier);
406 406
407u32 gk20a_fifo_default_timeslice_us(struct gk20a *g); 407u32 gk20a_fifo_default_timeslice_us(struct gk20a *g);
408
409#ifdef CONFIG_TEGRA_GK20A_NVHOST
410void gk20a_fifo_add_syncpt_wait_cmd(struct gk20a *g,
411 struct priv_cmd_entry *cmd, u32 off,
412 u32 id, u32 thresh, u64 gpu_va);
413u32 gk20a_fifo_get_syncpt_wait_cmd_size(void);
414void gk20a_fifo_add_syncpt_incr_cmd(struct gk20a *g,
415 bool wfi_cmd, struct priv_cmd_entry *cmd,
416 u32 id, u64 gpu_va);
417u32 gk20a_fifo_get_syncpt_incr_cmd_size(bool wfi_cmd);
418void gk20a_fifo_free_syncpt_buf(struct channel_gk20a *c,
419 struct nvgpu_mem *syncpt_buf);
420int gk20a_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
421 u32 syncpt_id, struct nvgpu_mem *syncpt_buf);
422#endif
423
408#endif /*__GR_GK20A_H__*/ 424#endif /*__GR_GK20A_H__*/
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 29ac4763..f246b857 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -499,6 +499,20 @@ struct gpu_ops {
499 unsigned int (*handle_pbdma_intr_0)(struct gk20a *g, 499 unsigned int (*handle_pbdma_intr_0)(struct gk20a *g,
500 u32 pbdma_id, u32 pbdma_intr_0, 500 u32 pbdma_id, u32 pbdma_intr_0,
501 u32 *handled, u32 *error_notifier); 501 u32 *handled, u32 *error_notifier);
502#ifdef CONFIG_TEGRA_GK20A_NVHOST
503 int (*alloc_syncpt_buf)(struct channel_gk20a *c,
504 u32 syncpt_id, struct nvgpu_mem *syncpt_buf);
505 void (*free_syncpt_buf)(struct channel_gk20a *c,
506 struct nvgpu_mem *syncpt_buf);
507 void (*add_syncpt_wait_cmd)(struct gk20a *g,
508 struct priv_cmd_entry *cmd, u32 off,
509 u32 id, u32 thresh, u64 gpu_va);
510 u32 (*get_syncpt_wait_cmd_size)(void);
511 void (*add_syncpt_incr_cmd)(struct gk20a *g,
512 bool wfi_cmd, struct priv_cmd_entry *cmd,
513 u32 id, u64 gpu_va);
514 u32 (*get_syncpt_incr_cmd_size)(bool wfi_cmd);
515#endif
502 } fifo; 516 } fifo;
503 struct pmu_v { 517 struct pmu_v {
504 /*used for change of enum zbc update cmd id from ver 0 to ver1*/ 518 /*used for change of enum zbc update cmd id from ver 0 to ver1*/
@@ -1243,6 +1257,13 @@ struct gk20a {
1243 /* Some boards might be missing power sensor, preventing 1257 /* Some boards might be missing power sensor, preventing
1244 * from monitoring power, current and voltage */ 1258 * from monitoring power, current and voltage */
1245 bool power_sensor_missing; 1259 bool power_sensor_missing;
1260
1261#if defined(CONFIG_TEGRA_GK20A_NVHOST) && defined(CONFIG_TEGRA_19x_GPU)
1262 phys_addr_t syncpt_unit_base;
1263 size_t syncpt_unit_size;
1264 u32 syncpt_size;
1265#endif
1266
1246}; 1267};
1247 1268
1248static inline unsigned long gk20a_get_gr_idle_timeout(struct gk20a *g) 1269static inline unsigned long gk20a_get_gr_idle_timeout(struct gk20a *g)
diff --git a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
index 0931d226..6fb5802b 100644
--- a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
@@ -230,4 +230,14 @@ void gm20b_init_fifo(struct gpu_ops *gops)
230 gops->fifo.teardown_ch_tsg = gk20a_fifo_teardown_ch_tsg; 230 gops->fifo.teardown_ch_tsg = gk20a_fifo_teardown_ch_tsg;
231 gops->fifo.handle_sched_error = gk20a_fifo_handle_sched_error; 231 gops->fifo.handle_sched_error = gk20a_fifo_handle_sched_error;
232 gops->fifo.handle_pbdma_intr_0 = gk20a_fifo_handle_pbdma_intr_0; 232 gops->fifo.handle_pbdma_intr_0 = gk20a_fifo_handle_pbdma_intr_0;
233#ifdef CONFIG_TEGRA_GK20A_NVHOST
234 gops->fifo.alloc_syncpt_buf = gk20a_fifo_alloc_syncpt_buf;
235 gops->fifo.free_syncpt_buf = gk20a_fifo_free_syncpt_buf;
236 gops->fifo.add_syncpt_wait_cmd = gk20a_fifo_add_syncpt_wait_cmd;
237 gops->fifo.get_syncpt_wait_cmd_size =
238 gk20a_fifo_get_syncpt_wait_cmd_size;
239 gops->fifo.add_syncpt_incr_cmd = gk20a_fifo_add_syncpt_incr_cmd;
240 gops->fifo.get_syncpt_incr_cmd_size =
241 gk20a_fifo_get_syncpt_incr_cmd_size;
242#endif
233} 243}