summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/fifo_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.c63
1 files changed, 52 insertions, 11 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
index e776e97c..9e40218d 100644
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Virtualized GPU Fifo 2 * Virtualized GPU Fifo
3 * 3 *
4 * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -81,6 +81,7 @@ static int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
81 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX; 81 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX;
82 msg.handle = platform->virt_handle; 82 msg.handle = platform->virt_handle;
83 p->id = ch->hw_chid; 83 p->id = ch->hw_chid;
84 p->pid = (u64)current->pid;
84 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 85 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
85 if (err || msg.ret) { 86 if (err || msg.ret) {
86 gk20a_err(dev_from_gk20a(g), "fail"); 87 gk20a_err(dev_from_gk20a(g), "fail");
@@ -194,12 +195,6 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
194 if (!runlist->active_channels) 195 if (!runlist->active_channels)
195 goto clean_up_runlist_info; 196 goto clean_up_runlist_info;
196 197
197 runlist->high_prio_channels =
198 kzalloc(DIV_ROUND_UP(f->num_channels, BITS_PER_BYTE),
199 GFP_KERNEL);
200 if (!runlist->high_prio_channels)
201 goto clean_up_runlist_info;
202
203 runlist_size = sizeof(u16) * f->num_channels; 198 runlist_size = sizeof(u16) * f->num_channels;
204 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { 199 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
205 int err = gk20a_gmmu_alloc(g, runlist_size, &runlist->mem[i]); 200 int err = gk20a_gmmu_alloc(g, runlist_size, &runlist->mem[i]);
@@ -222,9 +217,6 @@ clean_up_runlist:
222 gk20a_gmmu_free(g, &runlist->mem[i]); 217 gk20a_gmmu_free(g, &runlist->mem[i]);
223 218
224clean_up_runlist_info: 219clean_up_runlist_info:
225 kfree(runlist->high_prio_channels);
226 runlist->high_prio_channels = NULL;
227
228 kfree(runlist->active_channels); 220 kfree(runlist->active_channels);
229 runlist->active_channels = NULL; 221 runlist->active_channels = NULL;
230 222
@@ -550,6 +542,54 @@ static int vgpu_channel_set_priority(struct channel_gk20a *ch, u32 priority)
550 return err ? err : msg.ret; 542 return err ? err : msg.ret;
551} 543}
552 544
545static int vgpu_fifo_set_runlist_interleave(struct gk20a *g,
546 u32 id,
547 bool is_tsg,
548 u32 runlist_id,
549 u32 new_level)
550{
551 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
552 struct tegra_vgpu_cmd_msg msg;
553 struct tegra_vgpu_channel_runlist_interleave_params *p =
554 &msg.params.channel_interleave;
555 struct channel_gk20a *ch;
556 int err;
557
558 gk20a_dbg_fn("");
559
560 /* FIXME: add support for TSGs */
561 if (is_tsg)
562 return -ENOSYS;
563
564 ch = &g->fifo.channel[id];
565 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_RUNLIST_INTERLEAVE;
566 msg.handle = platform->virt_handle;
567 p->handle = ch->virt_ctx;
568 p->level = new_level;
569 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
570 WARN_ON(err || msg.ret);
571 return err ? err : msg.ret;
572}
573
574int vgpu_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice)
575{
576 struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev);
577 struct tegra_vgpu_cmd_msg msg;
578 struct tegra_vgpu_channel_timeslice_params *p =
579 &msg.params.channel_timeslice;
580 int err;
581
582 gk20a_dbg_fn("");
583
584 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_TIMESLICE;
585 msg.handle = platform->virt_handle;
586 p->handle = ch->virt_ctx;
587 p->timeslice_us = timeslice;
588 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
589 WARN_ON(err || msg.ret);
590 return err ? err : msg.ret;
591}
592
553static void vgpu_fifo_set_ctx_mmu_error(struct gk20a *g, 593static void vgpu_fifo_set_ctx_mmu_error(struct gk20a *g,
554 struct channel_gk20a *ch) 594 struct channel_gk20a *ch)
555{ 595{
@@ -635,5 +675,6 @@ void vgpu_init_fifo_ops(struct gpu_ops *gops)
635 gops->fifo.update_runlist = vgpu_fifo_update_runlist; 675 gops->fifo.update_runlist = vgpu_fifo_update_runlist;
636 gops->fifo.wait_engine_idle = vgpu_fifo_wait_engine_idle; 676 gops->fifo.wait_engine_idle = vgpu_fifo_wait_engine_idle;
637 gops->fifo.channel_set_priority = vgpu_channel_set_priority; 677 gops->fifo.channel_set_priority = vgpu_channel_set_priority;
678 gops->fifo.set_runlist_interleave = vgpu_fifo_set_runlist_interleave;
679 gops->fifo.channel_set_timeslice = vgpu_channel_set_timeslice;
638} 680}
639