summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c378
1 files changed, 11 insertions, 367 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 88495bde..2facb595 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -38,11 +38,7 @@
38#include "dbg_gpu_gk20a.h" 38#include "dbg_gpu_gk20a.h"
39#include "fence_gk20a.h" 39#include "fence_gk20a.h"
40 40
41#include <nvgpu/hw/gk20a/hw_ram_gk20a.h>
42#include <nvgpu/hw/gk20a/hw_fifo_gk20a.h>
43#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h> 41#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h>
44#include <nvgpu/hw/gk20a/hw_ccsr_gk20a.h>
45#include <nvgpu/hw/gk20a/hw_ltc_gk20a.h>
46 42
47#define NVMAP_HANDLE_PARAM_SIZE 1 43#define NVMAP_HANDLE_PARAM_SIZE 1
48 44
@@ -78,11 +74,6 @@ static void channel_gk20a_joblist_delete(struct channel_gk20a *c,
78static struct channel_gk20a_job *channel_gk20a_joblist_peek( 74static struct channel_gk20a_job *channel_gk20a_joblist_peek(
79 struct channel_gk20a *c); 75 struct channel_gk20a *c);
80 76
81static int channel_gk20a_commit_userd(struct channel_gk20a *c);
82static int channel_gk20a_setup_userd(struct channel_gk20a *c);
83
84static void channel_gk20a_bind(struct channel_gk20a *ch_gk20a);
85
86static int channel_gk20a_update_runlist(struct channel_gk20a *c, 77static int channel_gk20a_update_runlist(struct channel_gk20a *c,
87 bool add); 78 bool add);
88static void gk20a_free_error_notifiers(struct channel_gk20a *ch); 79static void gk20a_free_error_notifiers(struct channel_gk20a *ch);
@@ -159,34 +150,6 @@ int channel_gk20a_commit_va(struct channel_gk20a *c)
159 return 0; 150 return 0;
160} 151}
161 152
162static int channel_gk20a_commit_userd(struct channel_gk20a *c)
163{
164 u32 addr_lo;
165 u32 addr_hi;
166 struct gk20a *g = c->g;
167
168 gk20a_dbg_fn("");
169
170 addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v());
171 addr_hi = u64_hi32(c->userd_iova);
172
173 gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx",
174 c->hw_chid, (u64)c->userd_iova);
175
176 gk20a_mem_wr32(g, &c->inst_block,
177 ram_in_ramfc_w() + ram_fc_userd_w(),
178 gk20a_aperture_mask(g, &g->fifo.userd,
179 pbdma_userd_target_sys_mem_ncoh_f(),
180 pbdma_userd_target_vid_mem_f()) |
181 pbdma_userd_addr_f(addr_lo));
182
183 gk20a_mem_wr32(g, &c->inst_block,
184 ram_in_ramfc_w() + ram_fc_userd_hi_w(),
185 pbdma_userd_hi_addr_f(addr_hi));
186
187 return 0;
188}
189
190int gk20a_channel_get_timescale_from_timeslice(struct gk20a *g, 153int gk20a_channel_get_timescale_from_timeslice(struct gk20a *g,
191 int timeslice_period, 154 int timeslice_period,
192 int *__timeslice_timeout, int *__timeslice_scale) 155 int *__timeslice_timeout, int *__timeslice_scale)
@@ -215,255 +178,11 @@ int gk20a_channel_get_timescale_from_timeslice(struct gk20a *g,
215 return 0; 178 return 0;
216} 179}
217 180
218static int channel_gk20a_set_schedule_params(struct channel_gk20a *c)
219{
220 int shift = 0, value = 0;
221
222 gk20a_channel_get_timescale_from_timeslice(c->g,
223 c->timeslice_us, &value, &shift);
224
225 /* disable channel */
226 c->g->ops.fifo.disable_channel(c);
227
228 /* preempt the channel */
229 WARN_ON(c->g->ops.fifo.preempt_channel(c->g, c->hw_chid));
230
231 /* set new timeslice */
232 gk20a_mem_wr32(c->g, &c->inst_block, ram_fc_runlist_timeslice_w(),
233 value | (shift << 12) |
234 fifo_runlist_timeslice_enable_true_f());
235
236 /* enable channel */
237 gk20a_writel(c->g, ccsr_channel_r(c->hw_chid),
238 gk20a_readl(c->g, ccsr_channel_r(c->hw_chid)) |
239 ccsr_channel_enable_set_true_f());
240
241 return 0;
242}
243
244u32 channel_gk20a_pbdma_acquire_val(struct channel_gk20a *c)
245{
246 u32 val, exp, man;
247 u64 timeout;
248 unsigned int val_len;
249
250 val = pbdma_acquire_retry_man_2_f() |
251 pbdma_acquire_retry_exp_2_f();
252
253 if (!c->g->timeouts_enabled || !c->wdt_enabled)
254 return val;
255
256 timeout = gk20a_get_channel_watchdog_timeout(c);
257 timeout *= 80UL;
258 do_div(timeout, 100); /* set acquire timeout to 80% of channel wdt */
259 timeout *= 1000000UL; /* ms -> ns */
260 do_div(timeout, 1024); /* in unit of 1024ns */
261 val_len = fls(timeout >> 32) + 32;
262 if (val_len == 32)
263 val_len = fls(timeout);
264 if (val_len > 16U + pbdma_acquire_timeout_exp_max_v()) { /* man: 16bits */
265 exp = pbdma_acquire_timeout_exp_max_v();
266 man = pbdma_acquire_timeout_man_max_v();
267 } else if (val_len > 16) {
268 exp = val_len - 16;
269 man = timeout >> exp;
270 } else {
271 exp = 0;
272 man = timeout;
273 }
274
275 val |= pbdma_acquire_timeout_exp_f(exp) |
276 pbdma_acquire_timeout_man_f(man) |
277 pbdma_acquire_timeout_en_enable_f();
278
279 return val;
280}
281
282void gk20a_channel_setup_ramfc_for_privileged_channel(struct channel_gk20a *c)
283{
284 struct gk20a *g = c->g;
285 struct mem_desc *mem = &c->inst_block;
286
287 gk20a_dbg_info("channel %d : set ramfc privileged_channel", c->hw_chid);
288
289 /* Enable HCE priv mode for phys mode transfer */
290 gk20a_mem_wr32(g, mem, ram_fc_hce_ctrl_w(),
291 pbdma_hce_ctrl_hce_priv_mode_yes_f());
292}
293
294int channel_gk20a_setup_ramfc(struct channel_gk20a *c,
295 u64 gpfifo_base, u32 gpfifo_entries, u32 flags)
296{
297 struct gk20a *g = c->g;
298 struct mem_desc *mem = &c->inst_block;
299
300 gk20a_dbg_fn("");
301
302 gk20a_memset(g, mem, 0, 0, ram_fc_size_val_v());
303
304 gk20a_mem_wr32(g, mem, ram_fc_gp_base_w(),
305 pbdma_gp_base_offset_f(
306 u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s())));
307
308 gk20a_mem_wr32(g, mem, ram_fc_gp_base_hi_w(),
309 pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) |
310 pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries)));
311
312 gk20a_mem_wr32(g, mem, ram_fc_signature_w(),
313 c->g->ops.fifo.get_pbdma_signature(c->g));
314
315 gk20a_mem_wr32(g, mem, ram_fc_formats_w(),
316 pbdma_formats_gp_fermi0_f() |
317 pbdma_formats_pb_fermi1_f() |
318 pbdma_formats_mp_fermi0_f());
319
320 gk20a_mem_wr32(g, mem, ram_fc_pb_header_w(),
321 pbdma_pb_header_priv_user_f() |
322 pbdma_pb_header_method_zero_f() |
323 pbdma_pb_header_subchannel_zero_f() |
324 pbdma_pb_header_level_main_f() |
325 pbdma_pb_header_first_true_f() |
326 pbdma_pb_header_type_inc_f());
327
328 gk20a_mem_wr32(g, mem, ram_fc_subdevice_w(),
329 pbdma_subdevice_id_f(1) |
330 pbdma_subdevice_status_active_f() |
331 pbdma_subdevice_channel_dma_enable_f());
332
333 gk20a_mem_wr32(g, mem, ram_fc_target_w(), pbdma_target_engine_sw_f());
334
335 gk20a_mem_wr32(g, mem, ram_fc_acquire_w(),
336 channel_gk20a_pbdma_acquire_val(c));
337
338 gk20a_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(),
339 fifo_runlist_timeslice_timeout_128_f() |
340 fifo_runlist_timeslice_timescale_3_f() |
341 fifo_runlist_timeslice_enable_true_f());
342
343 gk20a_mem_wr32(g, mem, ram_fc_pb_timeslice_w(),
344 fifo_pb_timeslice_timeout_16_f() |
345 fifo_pb_timeslice_timescale_0_f() |
346 fifo_pb_timeslice_enable_true_f());
347
348 gk20a_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid));
349
350 if (c->is_privileged_channel)
351 gk20a_channel_setup_ramfc_for_privileged_channel(c);
352
353 return channel_gk20a_commit_userd(c);
354}
355
356static int channel_gk20a_setup_userd(struct channel_gk20a *c)
357{
358 struct gk20a *g = c->g;
359 struct mem_desc *mem = &g->fifo.userd;
360 u32 offset = c->hw_chid * g->fifo.userd_entry_size / sizeof(u32);
361
362 gk20a_dbg_fn("");
363
364 gk20a_mem_wr32(g, mem, offset + ram_userd_put_w(), 0);
365 gk20a_mem_wr32(g, mem, offset + ram_userd_get_w(), 0);
366 gk20a_mem_wr32(g, mem, offset + ram_userd_ref_w(), 0);
367 gk20a_mem_wr32(g, mem, offset + ram_userd_put_hi_w(), 0);
368 gk20a_mem_wr32(g, mem, offset + ram_userd_ref_threshold_w(), 0);
369 gk20a_mem_wr32(g, mem, offset + ram_userd_gp_top_level_get_w(), 0);
370 gk20a_mem_wr32(g, mem, offset + ram_userd_gp_top_level_get_hi_w(), 0);
371 gk20a_mem_wr32(g, mem, offset + ram_userd_get_hi_w(), 0);
372 gk20a_mem_wr32(g, mem, offset + ram_userd_gp_get_w(), 0);
373 gk20a_mem_wr32(g, mem, offset + ram_userd_gp_put_w(), 0);
374
375 return 0;
376}
377
378static void channel_gk20a_bind(struct channel_gk20a *c)
379{
380 struct gk20a *g = c->g;
381 u32 inst_ptr = gk20a_mm_inst_block_addr(g, &c->inst_block)
382 >> ram_in_base_shift_v();
383
384 gk20a_dbg_info("bind channel %d inst ptr 0x%08x",
385 c->hw_chid, inst_ptr);
386
387
388 gk20a_writel(g, ccsr_channel_r(c->hw_chid),
389 (gk20a_readl(g, ccsr_channel_r(c->hw_chid)) &
390 ~ccsr_channel_runlist_f(~0)) |
391 ccsr_channel_runlist_f(c->runlist_id));
392
393 gk20a_writel(g, ccsr_channel_inst_r(c->hw_chid),
394 ccsr_channel_inst_ptr_f(inst_ptr) |
395 gk20a_aperture_mask(g, &c->inst_block,
396 ccsr_channel_inst_target_sys_mem_ncoh_f(),
397 ccsr_channel_inst_target_vid_mem_f()) |
398 ccsr_channel_inst_bind_true_f());
399
400 gk20a_writel(g, ccsr_channel_r(c->hw_chid),
401 (gk20a_readl(g, ccsr_channel_r(c->hw_chid)) &
402 ~ccsr_channel_enable_set_f(~0)) |
403 ccsr_channel_enable_set_true_f());
404
405 wmb();
406 atomic_set(&c->bound, true);
407
408}
409
410void channel_gk20a_unbind(struct channel_gk20a *ch_gk20a)
411{
412 struct gk20a *g = ch_gk20a->g;
413
414 gk20a_dbg_fn("");
415
416 if (atomic_cmpxchg(&ch_gk20a->bound, true, false)) {
417 gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->hw_chid),
418 ccsr_channel_inst_ptr_f(0) |
419 ccsr_channel_inst_bind_false_f());
420 }
421}
422
423int channel_gk20a_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
424{
425 int err;
426
427 gk20a_dbg_fn("");
428
429 err = gk20a_alloc_inst_block(g, &ch->inst_block);
430 if (err)
431 return err;
432
433 gk20a_dbg_info("channel %d inst block physical addr: 0x%16llx",
434 ch->hw_chid, gk20a_mm_inst_block_addr(g, &ch->inst_block));
435
436 gk20a_dbg_fn("done");
437 return 0;
438}
439
440void channel_gk20a_free_inst(struct gk20a *g, struct channel_gk20a *ch)
441{
442 gk20a_free_inst_block(g, &ch->inst_block);
443}
444
445static int channel_gk20a_update_runlist(struct channel_gk20a *c, bool add) 181static int channel_gk20a_update_runlist(struct channel_gk20a *c, bool add)
446{ 182{
447 return c->g->ops.fifo.update_runlist(c->g, c->runlist_id, c->hw_chid, add, true); 183 return c->g->ops.fifo.update_runlist(c->g, c->runlist_id, c->hw_chid, add, true);
448} 184}
449 185
450void channel_gk20a_enable(struct channel_gk20a *ch)
451{
452 /* enable channel */
453 gk20a_writel(ch->g, ccsr_channel_r(ch->hw_chid),
454 gk20a_readl(ch->g, ccsr_channel_r(ch->hw_chid)) |
455 ccsr_channel_enable_set_true_f());
456}
457
458void channel_gk20a_disable(struct channel_gk20a *ch)
459{
460 /* disable channel */
461 gk20a_writel(ch->g, ccsr_channel_r(ch->hw_chid),
462 gk20a_readl(ch->g,
463 ccsr_channel_r(ch->hw_chid)) |
464 ccsr_channel_enable_clr_true_f());
465}
466
467int gk20a_enable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch) 186int gk20a_enable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch)
468{ 187{
469 struct tsg_gk20a *tsg; 188 struct tsg_gk20a *tsg;
@@ -991,8 +710,6 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
991 710
992 gk20a_gr_flush_channel_tlb(gr); 711 gk20a_gr_flush_channel_tlb(gr);
993 712
994 memset(&ch->ramfc, 0, sizeof(struct mem_desc_sub));
995
996 gk20a_gmmu_unmap_free(ch_vm, &ch->gpfifo.mem); 713 gk20a_gmmu_unmap_free(ch_vm, &ch->gpfifo.mem);
997 nvgpu_big_free(g, ch->gpfifo.pipe); 714 nvgpu_big_free(g, ch->gpfifo.pipe);
998 memset(&ch->gpfifo, 0, sizeof(struct gpfifo_desc)); 715 memset(&ch->gpfifo, 0, sizeof(struct gpfifo_desc));
@@ -1834,6 +1551,7 @@ int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
1834 struct vm_gk20a *ch_vm; 1551 struct vm_gk20a *ch_vm;
1835 u32 gpfifo_size; 1552 u32 gpfifo_size;
1836 int err = 0; 1553 int err = 0;
1554 unsigned long acquire_timeout;
1837 1555
1838 gpfifo_size = args->num_entries; 1556 gpfifo_size = args->num_entries;
1839 1557
@@ -1852,9 +1570,6 @@ int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
1852 } 1570 }
1853 ch_vm = c->vm; 1571 ch_vm = c->vm;
1854 1572
1855 c->ramfc.offset = 0;
1856 c->ramfc.size = ram_in_ramfc_s() / 8;
1857
1858 if (c->gpfifo.mem.size) { 1573 if (c->gpfifo.mem.size) {
1859 gk20a_err(d, "channel %d :" 1574 gk20a_err(d, "channel %d :"
1860 "gpfifo already allocated", c->hw_chid); 1575 "gpfifo already allocated", c->hw_chid);
@@ -1884,7 +1599,7 @@ int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
1884 gk20a_dbg_info("channel %d : gpfifo_base 0x%016llx, size %d", 1599 gk20a_dbg_info("channel %d : gpfifo_base 0x%016llx, size %d",
1885 c->hw_chid, c->gpfifo.mem.gpu_va, c->gpfifo.entry_num); 1600 c->hw_chid, c->gpfifo.mem.gpu_va, c->gpfifo.entry_num);
1886 1601
1887 channel_gk20a_setup_userd(c); 1602 g->ops.fifo.setup_userd(c);
1888 1603
1889 if (!platform->aggressive_sync_destroy_thresh) { 1604 if (!platform->aggressive_sync_destroy_thresh) {
1890 nvgpu_mutex_acquire(&c->sync_lock); 1605 nvgpu_mutex_acquire(&c->sync_lock);
@@ -1903,8 +1618,14 @@ int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
1903 } 1618 }
1904 } 1619 }
1905 1620
1621 if (!c->g->timeouts_enabled || !c->wdt_enabled)
1622 acquire_timeout = 0;
1623 else
1624 acquire_timeout = gk20a_get_channel_watchdog_timeout(c);
1625
1906 err = g->ops.fifo.setup_ramfc(c, c->gpfifo.mem.gpu_va, 1626 err = g->ops.fifo.setup_ramfc(c, c->gpfifo.mem.gpu_va,
1907 c->gpfifo.entry_num, args->flags); 1627 c->gpfifo.entry_num,
1628 acquire_timeout, args->flags);
1908 if (err) 1629 if (err)
1909 goto clean_up_sync; 1630 goto clean_up_sync;
1910 1631
@@ -1949,19 +1670,6 @@ clean_up:
1949 return err; 1670 return err;
1950} 1671}
1951 1672
1952u32 gk20a_userd_gp_get(struct gk20a *g, struct channel_gk20a *c)
1953{
1954 return gk20a_bar1_readl(g,
1955 c->userd_gpu_va + sizeof(u32) * ram_userd_gp_get_w());
1956}
1957
1958void gk20a_userd_gp_put(struct gk20a *g, struct channel_gk20a *c)
1959{
1960 gk20a_bar1_writel(g,
1961 c->userd_gpu_va + sizeof(u32) * ram_userd_gp_put_w(),
1962 c->gpfifo.put);
1963}
1964
1965/* Update with this periodically to determine how the gpfifo is draining. */ 1673/* Update with this periodically to determine how the gpfifo is draining. */
1966static inline u32 update_gp_get(struct gk20a *g, 1674static inline u32 update_gp_get(struct gk20a *g,
1967 struct channel_gk20a *c) 1675 struct channel_gk20a *c)
@@ -2093,7 +1801,7 @@ static void trace_write_pushbuffer_range(struct channel_gk20a *c,
2093 1801
2094static void __gk20a_channel_timeout_start(struct channel_gk20a *ch) 1802static void __gk20a_channel_timeout_start(struct channel_gk20a *ch)
2095{ 1803{
2096 ch->timeout.gp_get = gk20a_userd_gp_get(ch->g, ch); 1804 ch->timeout.gp_get = ch->g->ops.fifo.userd_gp_get(ch->g, ch);
2097 ch->timeout.running = true; 1805 ch->timeout.running = true;
2098 nvgpu_timeout_init(ch->g, &ch->timeout.timer, 1806 nvgpu_timeout_init(ch->g, &ch->timeout.timer,
2099 gk20a_get_channel_watchdog_timeout(ch), 1807 gk20a_get_channel_watchdog_timeout(ch),
@@ -2225,7 +1933,7 @@ static void gk20a_channel_timeout_handler(struct channel_gk20a *ch)
2225 ch->timeout.running = false; 1933 ch->timeout.running = false;
2226 nvgpu_raw_spinlock_release(&ch->timeout.lock); 1934 nvgpu_raw_spinlock_release(&ch->timeout.lock);
2227 1935
2228 if (gk20a_userd_gp_get(ch->g, ch) != gp_get) { 1936 if (g->ops.fifo.userd_gp_get(ch->g, ch) != gp_get) {
2229 /* Channel has advanced, reschedule */ 1937 /* Channel has advanced, reschedule */
2230 gk20a_channel_timeout_start(ch); 1938 gk20a_channel_timeout_start(ch);
2231 return; 1939 return;
@@ -3693,55 +3401,6 @@ static int gk20a_channel_event_id_ctrl(struct channel_gk20a *ch,
3693 return err; 3401 return err;
3694} 3402}
3695 3403
3696int gk20a_channel_set_priority(struct channel_gk20a *ch, u32 priority)
3697{
3698 if (gk20a_is_channel_marked_as_tsg(ch)) {
3699 gk20a_err(dev_from_gk20a(ch->g),
3700 "invalid operation for TSG!\n");
3701 return -EINVAL;
3702 }
3703
3704 /* set priority of graphics channel */
3705 switch (priority) {
3706 case NVGPU_PRIORITY_LOW:
3707 ch->timeslice_us = ch->g->timeslice_low_priority_us;
3708 break;
3709 case NVGPU_PRIORITY_MEDIUM:
3710 ch->timeslice_us = ch->g->timeslice_medium_priority_us;
3711 break;
3712 case NVGPU_PRIORITY_HIGH:
3713 ch->timeslice_us = ch->g->timeslice_high_priority_us;
3714 break;
3715 default:
3716 pr_err("Unsupported priority");
3717 return -EINVAL;
3718 }
3719
3720 return channel_gk20a_set_schedule_params(ch);
3721}
3722
3723int gk20a_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice)
3724{
3725 struct gk20a *g = ch->g;
3726
3727 if (gk20a_is_channel_marked_as_tsg(ch)) {
3728 gk20a_err(dev_from_gk20a(ch->g),
3729 "invalid operation for TSG!\n");
3730 return -EINVAL;
3731 }
3732
3733 if (timeslice < g->min_timeslice_us ||
3734 timeslice > g->max_timeslice_us)
3735 return -EINVAL;
3736
3737 ch->timeslice_us = timeslice;
3738
3739 gk20a_dbg(gpu_dbg_sched, "chid=%u timeslice=%u us",
3740 ch->hw_chid, timeslice);
3741
3742 return channel_gk20a_set_schedule_params(ch);
3743}
3744
3745static int gk20a_channel_zcull_bind(struct channel_gk20a *ch, 3404static int gk20a_channel_zcull_bind(struct channel_gk20a *ch,
3746 struct nvgpu_zcull_bind_args *args) 3405 struct nvgpu_zcull_bind_args *args)
3747{ 3406{
@@ -3924,21 +3583,6 @@ clean_up:
3924 return ret; 3583 return ret;
3925} 3584}
3926 3585
3927void gk20a_init_channel(struct gpu_ops *gops)
3928{
3929 gops->fifo.bind_channel = channel_gk20a_bind;
3930 gops->fifo.unbind_channel = channel_gk20a_unbind;
3931 gops->fifo.disable_channel = channel_gk20a_disable;
3932 gops->fifo.enable_channel = channel_gk20a_enable;
3933 gops->fifo.alloc_inst = channel_gk20a_alloc_inst;
3934 gops->fifo.free_inst = channel_gk20a_free_inst;
3935 gops->fifo.setup_ramfc = channel_gk20a_setup_ramfc;
3936 gops->fifo.channel_set_priority = gk20a_channel_set_priority;
3937 gops->fifo.channel_set_timeslice = gk20a_channel_set_timeslice;
3938 gops->fifo.userd_gp_get = gk20a_userd_gp_get;
3939 gops->fifo.userd_gp_put = gk20a_userd_gp_put;
3940}
3941
3942long gk20a_channel_ioctl(struct file *filp, 3586long gk20a_channel_ioctl(struct file *filp,
3943 unsigned int cmd, unsigned long arg) 3587 unsigned int cmd, unsigned long arg)
3944{ 3588{