summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_mm_gp10b.c
diff options
context:
space:
mode:
authorRichard Zhao <rizhao@nvidia.com>2018-01-30 02:24:37 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-02-27 17:30:52 -0500
commit6393eddfa996fba03464f897b85aa5ec79860fed (patch)
tree557ebe9be93e2b0464118e7d8ec019d9d5dbae5f /drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_mm_gp10b.c
parent7932568b7fe9e16b2b83bc58b2b3686c0d5e52d4 (diff)
gpu: nvgpu: vgpu: move common files out of linux folder
Most of files have been moved out of linux folder. More code could be common as halifying going on. Jira EVLR-2364 Change-Id: Ia9dbdbc82f45ceefe5c788eac7517000cd455d5e Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1649947 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_mm_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_mm_gp10b.c200
1 files changed, 0 insertions, 200 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_mm_gp10b.c
deleted file mode 100644
index 26ce891f..00000000
--- a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_mm_gp10b.c
+++ /dev/null
@@ -1,200 +0,0 @@
1/*
2 * Virtualized GPU Memory Management
3 *
4 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include "common/linux/vgpu/vgpu.h"
20#include "vgpu_mm_gp10b.h"
21#include "gk20a/mm_gk20a.h"
22
23#include <nvgpu/bug.h>
24#include <nvgpu/dma.h>
25#include <nvgpu/vgpu/vgpu_ivc.h>
26
27int vgpu_gp10b_init_mm_setup_hw(struct gk20a *g)
28{
29 g->mm.disable_bigpage = true;
30 return 0;
31}
32
33static inline int add_mem_desc(struct tegra_vgpu_mem_desc *mem_desc,
34 u64 addr, u64 size, size_t *oob_size)
35{
36 if (*oob_size < sizeof(*mem_desc))
37 return -ENOMEM;
38
39 mem_desc->addr = addr;
40 mem_desc->length = size;
41 *oob_size -= sizeof(*mem_desc);
42 return 0;
43}
44
45u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
46 u64 map_offset,
47 struct nvgpu_sgt *sgt,
48 u64 buffer_offset,
49 u64 size,
50 int pgsz_idx,
51 u8 kind_v,
52 u32 ctag_offset,
53 u32 flags,
54 int rw_flag,
55 bool clear_ctags,
56 bool sparse,
57 bool priv,
58 struct vm_gk20a_mapping_batch *batch,
59 enum nvgpu_aperture aperture)
60{
61 int err = 0;
62 struct gk20a *g = gk20a_from_vm(vm);
63 struct tegra_vgpu_cmd_msg msg;
64 struct tegra_vgpu_as_map_ex_params *p = &msg.params.as_map_ex;
65 struct tegra_vgpu_mem_desc *mem_desc;
66 u32 page_size = vm->gmmu_page_sizes[pgsz_idx];
67 u64 buffer_size = PAGE_ALIGN(size);
68 u64 space_to_skip = buffer_offset;
69 u32 mem_desc_count = 0, i;
70 void *handle = NULL;
71 size_t oob_size;
72 u8 prot;
73 void *sgl;
74
75 gk20a_dbg_fn("");
76
77 /* FIXME: add support for sparse mappings */
78
79 if (WARN_ON(!sgt) || WARN_ON(nvgpu_iommuable(g)))
80 return 0;
81
82 if (space_to_skip & (page_size - 1))
83 return 0;
84
85 memset(&msg, 0, sizeof(msg));
86
87 /* Allocate (or validate when map_offset != 0) the virtual address. */
88 if (!map_offset) {
89 map_offset = __nvgpu_vm_alloc_va(vm, size, pgsz_idx);
90 if (!map_offset) {
91 nvgpu_err(g, "failed to allocate va space");
92 err = -ENOMEM;
93 goto fail;
94 }
95 }
96
97 handle = vgpu_ivc_oob_get_ptr(vgpu_ivc_get_server_vmid(),
98 TEGRA_VGPU_QUEUE_CMD,
99 (void **)&mem_desc, &oob_size);
100 if (!handle) {
101 err = -EINVAL;
102 goto fail;
103 }
104 sgl = sgt->sgl;
105 while (sgl) {
106 u64 phys_addr;
107 u64 chunk_length;
108
109 /*
110 * Cut out sgl ents for space_to_skip.
111 */
112 if (space_to_skip &&
113 space_to_skip >= nvgpu_sgt_get_length(sgt, sgl)) {
114 space_to_skip -= nvgpu_sgt_get_length(sgt, sgl);
115 sgl = nvgpu_sgt_get_next(sgt, sgl);
116 continue;
117 }
118
119 phys_addr = nvgpu_sgt_get_phys(sgt, sgl) + space_to_skip;
120 chunk_length = min(size,
121 nvgpu_sgt_get_length(sgt, sgl) - space_to_skip);
122
123 if (add_mem_desc(&mem_desc[mem_desc_count++], phys_addr,
124 chunk_length, &oob_size)) {
125 err = -ENOMEM;
126 goto fail;
127 }
128
129 space_to_skip = 0;
130 size -= chunk_length;
131 sgl = nvgpu_sgt_get_next(sgt, sgl);
132
133 if (size == 0)
134 break;
135 }
136
137 if (rw_flag == gk20a_mem_flag_read_only)
138 prot = TEGRA_VGPU_MAP_PROT_READ_ONLY;
139 else if (rw_flag == gk20a_mem_flag_write_only)
140 prot = TEGRA_VGPU_MAP_PROT_WRITE_ONLY;
141 else
142 prot = TEGRA_VGPU_MAP_PROT_NONE;
143
144 if (pgsz_idx == gmmu_page_size_kernel) {
145 if (page_size == vm->gmmu_page_sizes[gmmu_page_size_small]) {
146 pgsz_idx = gmmu_page_size_small;
147 } else if (page_size ==
148 vm->gmmu_page_sizes[gmmu_page_size_big]) {
149 pgsz_idx = gmmu_page_size_big;
150 } else {
151 nvgpu_err(g, "invalid kernel page size %d",
152 page_size);
153 goto fail;
154 }
155 }
156
157 msg.cmd = TEGRA_VGPU_CMD_AS_MAP_EX;
158 msg.handle = vgpu_get_handle(g);
159 p->handle = vm->handle;
160 p->gpu_va = map_offset;
161 p->size = buffer_size;
162 p->mem_desc_count = mem_desc_count;
163 p->pgsz_idx = pgsz_idx;
164 p->iova = 0;
165 p->kind = kind_v;
166 if (flags & NVGPU_VM_MAP_CACHEABLE)
167 p->flags = TEGRA_VGPU_MAP_CACHEABLE;
168 if (flags & NVGPU_VM_MAP_IO_COHERENT)
169 p->flags |= TEGRA_VGPU_MAP_IO_COHERENT;
170 if (flags & NVGPU_VM_MAP_L3_ALLOC)
171 p->flags |= TEGRA_VGPU_MAP_L3_ALLOC;
172 p->prot = prot;
173 p->ctag_offset = ctag_offset;
174 p->clear_ctags = clear_ctags;
175 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
176 if (err || msg.ret)
177 goto fail;
178
179 /* TLB invalidate handled on server side */
180
181 vgpu_ivc_oob_put_ptr(handle);
182 return map_offset;
183fail:
184 if (handle)
185 vgpu_ivc_oob_put_ptr(handle);
186 nvgpu_err(g, "Failed: err=%d, msg.ret=%d", err, msg.ret);
187 nvgpu_err(g,
188 " Map: %-5s GPU virt %#-12llx +%#-9llx "
189 "phys offset: %#-4llx; pgsz: %3dkb perm=%-2s | "
190 "kind=%#02x APT=%-6s",
191 vm->name, map_offset, buffer_size, buffer_offset,
192 vm->gmmu_page_sizes[pgsz_idx] >> 10,
193 nvgpu_gmmu_perm_str(rw_flag),
194 kind_v, "SYSMEM");
195 for (i = 0; i < mem_desc_count; i++)
196 nvgpu_err(g, " > 0x%010llx + 0x%llx",
197 mem_desc[i].addr, mem_desc[i].length);
198
199 return 0;
200}