summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
diff options
context:
space:
mode:
authorRichard Zhao <rizhao@nvidia.com>2018-01-30 02:24:37 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-02-27 17:30:52 -0500
commit6393eddfa996fba03464f897b85aa5ec79860fed (patch)
tree557ebe9be93e2b0464118e7d8ec019d9d5dbae5f /drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
parent7932568b7fe9e16b2b83bc58b2b3686c0d5e52d4 (diff)
gpu: nvgpu: vgpu: move common files out of linux folder
Most of files have been moved out of linux folder. More code could be common as halifying going on. Jira EVLR-2364 Change-Id: Ia9dbdbc82f45ceefe5c788eac7517000cd455d5e Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1649947 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c206
1 files changed, 206 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
new file mode 100644
index 00000000..cf9a28c7
--- /dev/null
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
@@ -0,0 +1,206 @@
1/*
2 * Virtualized GPU Memory Management
3 *
4 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "vgpu_mm_gp10b.h"
26#include "gk20a/mm_gk20a.h"
27
28#include <nvgpu/bug.h>
29#include <nvgpu/dma.h>
30#include <nvgpu/vgpu/vgpu_ivc.h>
31#include <nvgpu/vgpu/vgpu.h>
32
33int vgpu_gp10b_init_mm_setup_hw(struct gk20a *g)
34{
35 g->mm.disable_bigpage = true;
36 return 0;
37}
38
39static inline int add_mem_desc(struct tegra_vgpu_mem_desc *mem_desc,
40 u64 addr, u64 size, size_t *oob_size)
41{
42 if (*oob_size < sizeof(*mem_desc))
43 return -ENOMEM;
44
45 mem_desc->addr = addr;
46 mem_desc->length = size;
47 *oob_size -= sizeof(*mem_desc);
48 return 0;
49}
50
51u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
52 u64 map_offset,
53 struct nvgpu_sgt *sgt,
54 u64 buffer_offset,
55 u64 size,
56 int pgsz_idx,
57 u8 kind_v,
58 u32 ctag_offset,
59 u32 flags,
60 int rw_flag,
61 bool clear_ctags,
62 bool sparse,
63 bool priv,
64 struct vm_gk20a_mapping_batch *batch,
65 enum nvgpu_aperture aperture)
66{
67 int err = 0;
68 struct gk20a *g = gk20a_from_vm(vm);
69 struct tegra_vgpu_cmd_msg msg;
70 struct tegra_vgpu_as_map_ex_params *p = &msg.params.as_map_ex;
71 struct tegra_vgpu_mem_desc *mem_desc;
72 u32 page_size = vm->gmmu_page_sizes[pgsz_idx];
73 u64 buffer_size = PAGE_ALIGN(size);
74 u64 space_to_skip = buffer_offset;
75 u32 mem_desc_count = 0, i;
76 void *handle = NULL;
77 size_t oob_size;
78 u8 prot;
79 void *sgl;
80
81 gk20a_dbg_fn("");
82
83 /* FIXME: add support for sparse mappings */
84
85 if (WARN_ON(!sgt) || WARN_ON(nvgpu_iommuable(g)))
86 return 0;
87
88 if (space_to_skip & (page_size - 1))
89 return 0;
90
91 memset(&msg, 0, sizeof(msg));
92
93 /* Allocate (or validate when map_offset != 0) the virtual address. */
94 if (!map_offset) {
95 map_offset = __nvgpu_vm_alloc_va(vm, size, pgsz_idx);
96 if (!map_offset) {
97 nvgpu_err(g, "failed to allocate va space");
98 err = -ENOMEM;
99 goto fail;
100 }
101 }
102
103 handle = vgpu_ivc_oob_get_ptr(vgpu_ivc_get_server_vmid(),
104 TEGRA_VGPU_QUEUE_CMD,
105 (void **)&mem_desc, &oob_size);
106 if (!handle) {
107 err = -EINVAL;
108 goto fail;
109 }
110 sgl = sgt->sgl;
111 while (sgl) {
112 u64 phys_addr;
113 u64 chunk_length;
114
115 /*
116 * Cut out sgl ents for space_to_skip.
117 */
118 if (space_to_skip &&
119 space_to_skip >= nvgpu_sgt_get_length(sgt, sgl)) {
120 space_to_skip -= nvgpu_sgt_get_length(sgt, sgl);
121 sgl = nvgpu_sgt_get_next(sgt, sgl);
122 continue;
123 }
124
125 phys_addr = nvgpu_sgt_get_phys(sgt, sgl) + space_to_skip;
126 chunk_length = min(size,
127 nvgpu_sgt_get_length(sgt, sgl) - space_to_skip);
128
129 if (add_mem_desc(&mem_desc[mem_desc_count++], phys_addr,
130 chunk_length, &oob_size)) {
131 err = -ENOMEM;
132 goto fail;
133 }
134
135 space_to_skip = 0;
136 size -= chunk_length;
137 sgl = nvgpu_sgt_get_next(sgt, sgl);
138
139 if (size == 0)
140 break;
141 }
142
143 if (rw_flag == gk20a_mem_flag_read_only)
144 prot = TEGRA_VGPU_MAP_PROT_READ_ONLY;
145 else if (rw_flag == gk20a_mem_flag_write_only)
146 prot = TEGRA_VGPU_MAP_PROT_WRITE_ONLY;
147 else
148 prot = TEGRA_VGPU_MAP_PROT_NONE;
149
150 if (pgsz_idx == gmmu_page_size_kernel) {
151 if (page_size == vm->gmmu_page_sizes[gmmu_page_size_small]) {
152 pgsz_idx = gmmu_page_size_small;
153 } else if (page_size ==
154 vm->gmmu_page_sizes[gmmu_page_size_big]) {
155 pgsz_idx = gmmu_page_size_big;
156 } else {
157 nvgpu_err(g, "invalid kernel page size %d",
158 page_size);
159 goto fail;
160 }
161 }
162
163 msg.cmd = TEGRA_VGPU_CMD_AS_MAP_EX;
164 msg.handle = vgpu_get_handle(g);
165 p->handle = vm->handle;
166 p->gpu_va = map_offset;
167 p->size = buffer_size;
168 p->mem_desc_count = mem_desc_count;
169 p->pgsz_idx = pgsz_idx;
170 p->iova = 0;
171 p->kind = kind_v;
172 if (flags & NVGPU_VM_MAP_CACHEABLE)
173 p->flags = TEGRA_VGPU_MAP_CACHEABLE;
174 if (flags & NVGPU_VM_MAP_IO_COHERENT)
175 p->flags |= TEGRA_VGPU_MAP_IO_COHERENT;
176 if (flags & NVGPU_VM_MAP_L3_ALLOC)
177 p->flags |= TEGRA_VGPU_MAP_L3_ALLOC;
178 p->prot = prot;
179 p->ctag_offset = ctag_offset;
180 p->clear_ctags = clear_ctags;
181 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
182 if (err || msg.ret)
183 goto fail;
184
185 /* TLB invalidate handled on server side */
186
187 vgpu_ivc_oob_put_ptr(handle);
188 return map_offset;
189fail:
190 if (handle)
191 vgpu_ivc_oob_put_ptr(handle);
192 nvgpu_err(g, "Failed: err=%d, msg.ret=%d", err, msg.ret);
193 nvgpu_err(g,
194 " Map: %-5s GPU virt %#-12llx +%#-9llx "
195 "phys offset: %#-4llx; pgsz: %3dkb perm=%-2s | "
196 "kind=%#02x APT=%-6s",
197 vm->name, map_offset, buffer_size, buffer_offset,
198 vm->gmmu_page_sizes[pgsz_idx] >> 10,
199 nvgpu_gmmu_perm_str(rw_flag),
200 kind_v, "SYSMEM");
201 for (i = 0; i < mem_desc_count; i++)
202 nvgpu_err(g, " > 0x%010llx + 0x%llx",
203 mem_desc[i].addr, mem_desc[i].length);
204
205 return 0;
206}