summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c145
1 files changed, 145 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c
new file mode 100644
index 00000000..18d2de70
--- /dev/null
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c
@@ -0,0 +1,145 @@
1/*
2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <gk20a/gk20a.h>
24#include <nvgpu/vgpu/vgpu.h>
25#include <nvgpu/nvhost.h>
26#include <nvgpu/vgpu/tegra_vgpu.h>
27
28#include "gv11b/fifo_gv11b.h"
29
30#ifdef CONFIG_TEGRA_GK20A_NVHOST
31
32static int set_syncpt_ro_map_gpu_va_locked(struct vm_gk20a *vm)
33{
34 int err;
35 struct gk20a *g = gk20a_from_vm(vm);
36 struct tegra_vgpu_cmd_msg msg = {};
37 struct tegra_vgpu_map_syncpt_params *p = &msg.params.map_syncpt;
38
39 if (vm->syncpt_ro_map_gpu_va)
40 return 0;
41
42 vm->syncpt_ro_map_gpu_va = __nvgpu_vm_alloc_va(vm,
43 g->syncpt_unit_size,
44 gmmu_page_size_kernel);
45 if (!vm->syncpt_ro_map_gpu_va) {
46 nvgpu_err(g, "allocating read-only va space failed");
47 return -ENOMEM;
48 }
49
50 msg.cmd = TEGRA_VGPU_CMD_MAP_SYNCPT;
51 msg.handle = vgpu_get_handle(g);
52 p->as_handle = vm->handle;
53 p->gpu_va = vm->syncpt_ro_map_gpu_va;
54 p->len = g->syncpt_unit_size;
55 p->offset = 0;
56 p->prot = TEGRA_VGPU_MAP_PROT_READ_ONLY;
57 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
58 err = err ? err : msg.ret;
59 if (err) {
60 nvgpu_err(g,
61 "mapping read-only va space failed err %d",
62 err);
63 __nvgpu_vm_free_va(vm, vm->syncpt_ro_map_gpu_va,
64 gmmu_page_size_kernel);
65 vm->syncpt_ro_map_gpu_va = 0;
66 return err;
67 }
68
69 return 0;
70}
71
72int vgpu_gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
73 u32 syncpt_id, struct nvgpu_mem *syncpt_buf)
74{
75 int err;
76 struct gk20a *g = c->g;
77 struct tegra_vgpu_cmd_msg msg = {};
78 struct tegra_vgpu_map_syncpt_params *p = &msg.params.map_syncpt;
79
80 /*
81 * Add ro map for complete sync point shim range in vm.
82 * All channels sharing same vm will share same ro mapping.
83 * Create rw map for current channel sync point.
84 */
85 nvgpu_mutex_acquire(&c->vm->syncpt_ro_map_lock);
86 err = set_syncpt_ro_map_gpu_va_locked(c->vm);
87 nvgpu_mutex_release(&c->vm->syncpt_ro_map_lock);
88 if (err)
89 return err;
90
91 syncpt_buf->gpu_va = __nvgpu_vm_alloc_va(c->vm, g->syncpt_size,
92 gmmu_page_size_kernel);
93 if (!syncpt_buf->gpu_va) {
94 nvgpu_err(g, "allocating syncpt va space failed");
95 return -ENOMEM;
96 }
97
98 msg.cmd = TEGRA_VGPU_CMD_MAP_SYNCPT;
99 msg.handle = vgpu_get_handle(g);
100 p->as_handle = c->vm->handle;
101 p->gpu_va = syncpt_buf->gpu_va;
102 p->len = g->syncpt_size;
103 p->offset =
104 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(syncpt_id);
105 p->prot = TEGRA_VGPU_MAP_PROT_NONE;
106 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
107 err = err ? err : msg.ret;
108 if (err) {
109 nvgpu_err(g, "mapping syncpt va space failed err %d", err);
110 __nvgpu_vm_free_va(c->vm, syncpt_buf->gpu_va,
111 gmmu_page_size_kernel);
112 return err;
113 }
114
115 return 0;
116}
117
118int vgpu_gv11b_fifo_get_sync_ro_map(struct vm_gk20a *vm,
119 u64 *base_gpuva, u32 *sync_size)
120{
121 struct gk20a *g = gk20a_from_vm(vm);
122 int err;
123
124 nvgpu_mutex_acquire(&vm->syncpt_ro_map_lock);
125 err = set_syncpt_ro_map_gpu_va_locked(vm);
126 nvgpu_mutex_release(&vm->syncpt_ro_map_lock);
127 if (err)
128 return err;
129
130 *base_gpuva = vm->syncpt_ro_map_gpu_va;
131 *sync_size = g->syncpt_size;
132
133 return 0;
134}
135#endif /* CONFIG_TEGRA_GK20A_NVHOST */
136
137int vgpu_gv11b_init_fifo_setup_hw(struct gk20a *g)
138{
139 struct fifo_gk20a *f = &g->fifo;
140 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
141
142 f->max_subctx_count = priv->constants.max_subctx_count;
143
144 return 0;
145}