summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c203
1 files changed, 0 insertions, 203 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
deleted file mode 100644
index 5b48cca8..00000000
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
+++ /dev/null
@@ -1,203 +0,0 @@
1/*
2 * Virtualized GPU Memory Management
3 *
4 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <uapi/linux/nvgpu.h>
26
27#include "vgpu/vgpu.h"
28#include "vgpu_mm_gp10b.h"
29#include "gk20a/mm_gk20a.h"
30
31#include <nvgpu/bug.h>
32
33int vgpu_gp10b_init_mm_setup_hw(struct gk20a *g)
34{
35 g->mm.bypass_smmu = true;
36 g->mm.disable_bigpage = true;
37 return 0;
38}
39
40static inline int add_mem_desc(struct tegra_vgpu_mem_desc *mem_desc,
41 u64 addr, u64 size, size_t *oob_size)
42{
43 if (*oob_size < sizeof(*mem_desc))
44 return -ENOMEM;
45
46 mem_desc->addr = addr;
47 mem_desc->length = size;
48 *oob_size -= sizeof(*mem_desc);
49 return 0;
50}
51
52u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
53 u64 map_offset,
54 struct nvgpu_sgt *sgt,
55 u64 buffer_offset,
56 u64 size,
57 int pgsz_idx,
58 u8 kind_v,
59 u32 ctag_offset,
60 u32 flags,
61 int rw_flag,
62 bool clear_ctags,
63 bool sparse,
64 bool priv,
65 struct vm_gk20a_mapping_batch *batch,
66 enum nvgpu_aperture aperture)
67{
68 int err = 0;
69 struct gk20a *g = gk20a_from_vm(vm);
70 struct tegra_vgpu_cmd_msg msg;
71 struct tegra_vgpu_as_map_ex_params *p = &msg.params.as_map_ex;
72 struct tegra_vgpu_mem_desc *mem_desc;
73 u32 page_size = vm->gmmu_page_sizes[pgsz_idx];
74 u64 buffer_size = PAGE_ALIGN(size);
75 u64 space_to_skip = buffer_offset;
76 u32 mem_desc_count = 0, i;
77 void *handle = NULL;
78 size_t oob_size;
79 u8 prot;
80 void *sgl;
81
82 gk20a_dbg_fn("");
83
84 /* FIXME: add support for sparse mappings */
85
86 if (WARN_ON(!sgt) || WARN_ON(!g->mm.bypass_smmu))
87 return 0;
88
89 if (space_to_skip & (page_size - 1))
90 return 0;
91
92 memset(&msg, 0, sizeof(msg));
93
94 /* Allocate (or validate when map_offset != 0) the virtual address. */
95 if (!map_offset) {
96 map_offset = __nvgpu_vm_alloc_va(vm, size, pgsz_idx);
97 if (!map_offset) {
98 nvgpu_err(g, "failed to allocate va space");
99 err = -ENOMEM;
100 goto fail;
101 }
102 }
103
104 handle = tegra_gr_comm_oob_get_ptr(TEGRA_GR_COMM_CTX_CLIENT,
105 tegra_gr_comm_get_server_vmid(),
106 TEGRA_VGPU_QUEUE_CMD,
107 (void **)&mem_desc, &oob_size);
108 if (!handle) {
109 err = -EINVAL;
110 goto fail;
111 }
112 sgl = sgt->sgl;
113 while (sgl) {
114 u64 phys_addr;
115 u64 chunk_length;
116
117 /*
118 * Cut out sgl ents for space_to_skip.
119 */
120 if (space_to_skip &&
121 space_to_skip >= nvgpu_sgt_get_length(sgt, sgl)) {
122 space_to_skip -= nvgpu_sgt_get_length(sgt, sgl);
123 sgl = nvgpu_sgt_get_next(sgt, sgl);
124 continue;
125 }
126
127 phys_addr = nvgpu_sgt_get_phys(sgt, sgl) + space_to_skip;
128 chunk_length = min(size,
129 nvgpu_sgt_get_length(sgt, sgl) - space_to_skip);
130
131 if (add_mem_desc(&mem_desc[mem_desc_count++], phys_addr,
132 chunk_length, &oob_size)) {
133 err = -ENOMEM;
134 goto fail;
135 }
136
137 space_to_skip = 0;
138 size -= chunk_length;
139 sgl = nvgpu_sgt_get_next(sgt, sgl);
140
141 if (size == 0)
142 break;
143 }
144
145 if (rw_flag == gk20a_mem_flag_read_only)
146 prot = TEGRA_VGPU_MAP_PROT_READ_ONLY;
147 else if (rw_flag == gk20a_mem_flag_write_only)
148 prot = TEGRA_VGPU_MAP_PROT_WRITE_ONLY;
149 else
150 prot = TEGRA_VGPU_MAP_PROT_NONE;
151
152 if (pgsz_idx == gmmu_page_size_kernel) {
153 if (page_size == vm->gmmu_page_sizes[gmmu_page_size_small]) {
154 pgsz_idx = gmmu_page_size_small;
155 } else if (page_size ==
156 vm->gmmu_page_sizes[gmmu_page_size_big]) {
157 pgsz_idx = gmmu_page_size_big;
158 } else {
159 nvgpu_err(g, "invalid kernel page size %d",
160 page_size);
161 goto fail;
162 }
163 }
164
165 msg.cmd = TEGRA_VGPU_CMD_AS_MAP_EX;
166 msg.handle = vgpu_get_handle(g);
167 p->handle = vm->handle;
168 p->gpu_va = map_offset;
169 p->size = buffer_size;
170 p->mem_desc_count = mem_desc_count;
171 p->pgsz_idx = pgsz_idx;
172 p->iova = 0;
173 p->kind = kind_v;
174 p->cacheable = (flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE) ? 1 : 0;
175 p->prot = prot;
176 p->ctag_offset = ctag_offset;
177 p->clear_ctags = clear_ctags;
178 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
179 if (err || msg.ret)
180 goto fail;
181
182 /* TLB invalidate handled on server side */
183
184 tegra_gr_comm_oob_put_ptr(handle);
185 return map_offset;
186fail:
187 if (handle)
188 tegra_gr_comm_oob_put_ptr(handle);
189 nvgpu_err(g, "Failed: err=%d, msg.ret=%d", err, msg.ret);
190 nvgpu_err(g,
191 " Map: %-5s GPU virt %#-12llx +%#-9llx "
192 "phys offset: %#-4llx; pgsz: %3dkb perm=%-2s | "
193 "kind=%#02x APT=%-6s",
194 vm->name, map_offset, buffer_size, buffer_offset,
195 vm->gmmu_page_sizes[pgsz_idx] >> 10,
196 nvgpu_gmmu_perm_str(rw_flag),
197 kind_v, "SYSMEM");
198 for (i = 0; i < mem_desc_count; i++)
199 nvgpu_err(g, " > 0x%010llx + 0x%llx",
200 mem_desc[i].addr, mem_desc[i].length);
201
202 return 0;
203}