aboutsummaryrefslogtreecommitdiffstats
path: root/include/os/linux/ioctl_as.c
diff options
context:
space:
mode:
Diffstat (limited to 'include/os/linux/ioctl_as.c')
-rw-r--r--include/os/linux/ioctl_as.c427
1 files changed, 0 insertions, 427 deletions
diff --git a/include/os/linux/ioctl_as.c b/include/os/linux/ioctl_as.c
deleted file mode 100644
index f0cec17..0000000
--- a/include/os/linux/ioctl_as.c
+++ /dev/null
@@ -1,427 +0,0 @@
1/*
2 * GK20A Address Spaces
3 *
4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#include <linux/cdev.h>
17#include <linux/uaccess.h>
18#include <linux/fs.h>
19
20#include <trace/events/gk20a.h>
21
22#include <uapi/linux/nvgpu.h>
23
24#include <nvgpu/gmmu.h>
25#include <nvgpu/vm_area.h>
26#include <nvgpu/log2.h>
27#include <nvgpu/gk20a.h>
28#include <nvgpu/channel.h>
29
30#include <nvgpu/linux/vm.h>
31
32#include "platform_gk20a.h"
33#include "ioctl_as.h"
34#include "os_linux.h"
35
36static u32 gk20a_as_translate_as_alloc_space_flags(struct gk20a *g, u32 flags)
37{
38 u32 core_flags = 0;
39
40 if (flags & NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET)
41 core_flags |= NVGPU_VM_AREA_ALLOC_FIXED_OFFSET;
42 if (flags & NVGPU_AS_ALLOC_SPACE_FLAGS_SPARSE)
43 core_flags |= NVGPU_VM_AREA_ALLOC_SPARSE;
44
45 return core_flags;
46}
47
48static int gk20a_as_ioctl_bind_channel(
49 struct gk20a_as_share *as_share,
50 struct nvgpu_as_bind_channel_args *args)
51{
52 int err = 0;
53 struct channel_gk20a *ch;
54 struct gk20a *g = gk20a_from_vm(as_share->vm);
55
56 nvgpu_log_fn(g, " ");
57
58 ch = gk20a_get_channel_from_file(args->channel_fd);
59 if (!ch)
60 return -EINVAL;
61
62 if (gk20a_channel_as_bound(ch)) {
63 err = -EINVAL;
64 goto out;
65 }
66
67 /* this will set channel_gk20a->vm */
68 err = ch->g->ops.mm.vm_bind_channel(as_share->vm, ch);
69
70out:
71 gk20a_channel_put(ch);
72 return err;
73}
74
75static int gk20a_as_ioctl_alloc_space(
76 struct gk20a_as_share *as_share,
77 struct nvgpu_as_alloc_space_args *args)
78{
79 struct gk20a *g = gk20a_from_vm(as_share->vm);
80
81 nvgpu_log_fn(g, " ");
82 return nvgpu_vm_area_alloc(as_share->vm, args->pages, args->page_size,
83 &args->o_a.offset,
84 gk20a_as_translate_as_alloc_space_flags(g,
85 args->flags));
86}
87
88static int gk20a_as_ioctl_free_space(
89 struct gk20a_as_share *as_share,
90 struct nvgpu_as_free_space_args *args)
91{
92 struct gk20a *g = gk20a_from_vm(as_share->vm);
93
94 nvgpu_log_fn(g, " ");
95 return nvgpu_vm_area_free(as_share->vm, args->offset);
96}
97
98static int gk20a_as_ioctl_map_buffer_ex(
99 struct gk20a_as_share *as_share,
100 struct nvgpu_as_map_buffer_ex_args *args)
101{
102 struct gk20a *g = gk20a_from_vm(as_share->vm);
103
104 nvgpu_log_fn(g, " ");
105
106 /* unsupported, direct kind control must be used */
107 if (!(args->flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL)) {
108 struct gk20a *g = as_share->vm->mm->g;
109 nvgpu_log_info(g, "Direct kind control must be requested");
110 return -EINVAL;
111 }
112
113 return nvgpu_vm_map_buffer(as_share->vm, args->dmabuf_fd,
114 &args->offset, args->flags,
115 args->page_size,
116 args->compr_kind,
117 args->incompr_kind,
118 args->buffer_offset,
119 args->mapping_size,
120 NULL);
121}
122
123static int gk20a_as_ioctl_unmap_buffer(
124 struct gk20a_as_share *as_share,
125 struct nvgpu_as_unmap_buffer_args *args)
126{
127 struct gk20a *g = gk20a_from_vm(as_share->vm);
128
129 nvgpu_log_fn(g, " ");
130
131 nvgpu_vm_unmap(as_share->vm, args->offset, NULL);
132
133 return 0;
134}
135
136static int gk20a_as_ioctl_map_buffer_batch(
137 struct gk20a_as_share *as_share,
138 struct nvgpu_as_map_buffer_batch_args *args)
139{
140 struct gk20a *g = gk20a_from_vm(as_share->vm);
141 u32 i;
142 int err = 0;
143
144 struct nvgpu_as_unmap_buffer_args __user *user_unmap_args =
145 (struct nvgpu_as_unmap_buffer_args __user *)(uintptr_t)
146 args->unmaps;
147 struct nvgpu_as_map_buffer_ex_args __user *user_map_args =
148 (struct nvgpu_as_map_buffer_ex_args __user *)(uintptr_t)
149 args->maps;
150
151 struct vm_gk20a_mapping_batch batch;
152
153 nvgpu_log_fn(g, " ");
154
155 if (args->num_unmaps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT ||
156 args->num_maps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT)
157 return -EINVAL;
158
159 nvgpu_vm_mapping_batch_start(&batch);
160
161 for (i = 0; i < args->num_unmaps; ++i) {
162 struct nvgpu_as_unmap_buffer_args unmap_args;
163
164 if (copy_from_user(&unmap_args, &user_unmap_args[i],
165 sizeof(unmap_args))) {
166 err = -EFAULT;
167 break;
168 }
169
170 nvgpu_vm_unmap(as_share->vm, unmap_args.offset, &batch);
171 }
172
173 nvgpu_speculation_barrier();
174 if (err) {
175 nvgpu_vm_mapping_batch_finish(as_share->vm, &batch);
176
177 args->num_unmaps = i;
178 args->num_maps = 0;
179 return err;
180 }
181
182 for (i = 0; i < args->num_maps; ++i) {
183 s16 compressible_kind;
184 s16 incompressible_kind;
185
186 struct nvgpu_as_map_buffer_ex_args map_args;
187 memset(&map_args, 0, sizeof(map_args));
188
189 if (copy_from_user(&map_args, &user_map_args[i],
190 sizeof(map_args))) {
191 err = -EFAULT;
192 break;
193 }
194
195 if (map_args.flags &
196 NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL) {
197 compressible_kind = map_args.compr_kind;
198 incompressible_kind = map_args.incompr_kind;
199 } else {
200 /* direct kind control must be used */
201 err = -EINVAL;
202 break;
203 }
204
205 err = nvgpu_vm_map_buffer(
206 as_share->vm, map_args.dmabuf_fd,
207 &map_args.offset, map_args.flags, map_args.page_size,
208 compressible_kind, incompressible_kind,
209 map_args.buffer_offset,
210 map_args.mapping_size,
211 &batch);
212 if (err)
213 break;
214 }
215
216 nvgpu_vm_mapping_batch_finish(as_share->vm, &batch);
217
218 if (err)
219 args->num_maps = i;
220 /* note: args->num_unmaps will be unmodified, which is ok
221 * since all unmaps are done */
222
223 return err;
224}
225
226static int gk20a_as_ioctl_get_va_regions(
227 struct gk20a_as_share *as_share,
228 struct nvgpu_as_get_va_regions_args *args)
229{
230 unsigned int i;
231 unsigned int write_entries;
232 struct nvgpu_as_va_region __user *user_region_ptr;
233 struct vm_gk20a *vm = as_share->vm;
234 struct gk20a *g = gk20a_from_vm(vm);
235 unsigned int page_sizes = GMMU_PAGE_SIZE_KERNEL;
236
237 nvgpu_log_fn(g, " ");
238
239 if (!vm->big_pages)
240 page_sizes--;
241
242 write_entries = args->buf_size / sizeof(struct nvgpu_as_va_region);
243 if (write_entries > page_sizes)
244 write_entries = page_sizes;
245
246 user_region_ptr =
247 (struct nvgpu_as_va_region __user *)(uintptr_t)args->buf_addr;
248
249 for (i = 0; i < write_entries; ++i) {
250 struct nvgpu_as_va_region region;
251 struct nvgpu_allocator *vma = vm->vma[i];
252
253 memset(&region, 0, sizeof(struct nvgpu_as_va_region));
254
255 region.page_size = vm->gmmu_page_sizes[i];
256 region.offset = nvgpu_alloc_base(vma);
257 /* No __aeabi_uldivmod() on some platforms... */
258 region.pages = (nvgpu_alloc_end(vma) -
259 nvgpu_alloc_base(vma)) >> ilog2(region.page_size);
260
261 if (copy_to_user(user_region_ptr + i, &region, sizeof(region)))
262 return -EFAULT;
263 }
264
265 args->buf_size =
266 page_sizes * sizeof(struct nvgpu_as_va_region);
267
268 return 0;
269}
270
271static int nvgpu_as_ioctl_get_sync_ro_map(
272 struct gk20a_as_share *as_share,
273 struct nvgpu_as_get_sync_ro_map_args *args)
274{
275#ifdef CONFIG_TEGRA_GK20A_NVHOST
276 struct vm_gk20a *vm = as_share->vm;
277 struct gk20a *g = gk20a_from_vm(vm);
278 u64 base_gpuva;
279 u32 sync_size;
280 int err = 0;
281
282 if (!g->ops.fifo.get_sync_ro_map)
283 return -EINVAL;
284
285 if (!nvgpu_has_syncpoints(g))
286 return -EINVAL;
287
288 err = g->ops.fifo.get_sync_ro_map(vm, &base_gpuva, &sync_size);
289 if (err)
290 return err;
291
292 args->base_gpuva = base_gpuva;
293 args->sync_size = sync_size;
294
295 return err;
296#else
297 return -EINVAL;
298#endif
299}
300
301int gk20a_as_dev_open(struct inode *inode, struct file *filp)
302{
303 struct nvgpu_os_linux *l;
304 struct gk20a_as_share *as_share;
305 struct gk20a *g;
306 int err;
307
308 l = container_of(inode->i_cdev, struct nvgpu_os_linux, as_dev.cdev);
309 g = &l->g;
310
311 nvgpu_log_fn(g, " ");
312
313 err = gk20a_as_alloc_share(g, 0, 0, &as_share);
314 if (err) {
315 nvgpu_log_fn(g, "failed to alloc share");
316 return err;
317 }
318
319 filp->private_data = as_share;
320 return 0;
321}
322
323int gk20a_as_dev_release(struct inode *inode, struct file *filp)
324{
325 struct gk20a_as_share *as_share = filp->private_data;
326
327 if (!as_share)
328 return 0;
329
330 return gk20a_as_release_share(as_share);
331}
332
333long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
334{
335 int err = 0;
336 struct gk20a_as_share *as_share = filp->private_data;
337 struct gk20a *g = gk20a_from_as(as_share->as);
338
339 u8 buf[NVGPU_AS_IOCTL_MAX_ARG_SIZE];
340
341 nvgpu_log_fn(g, "start %d", _IOC_NR(cmd));
342
343 if ((_IOC_TYPE(cmd) != NVGPU_AS_IOCTL_MAGIC) ||
344 (_IOC_NR(cmd) == 0) ||
345 (_IOC_NR(cmd) > NVGPU_AS_IOCTL_LAST) ||
346 (_IOC_SIZE(cmd) > NVGPU_AS_IOCTL_MAX_ARG_SIZE))
347 return -EINVAL;
348
349 memset(buf, 0, sizeof(buf));
350 if (_IOC_DIR(cmd) & _IOC_WRITE) {
351 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
352 return -EFAULT;
353 }
354
355 err = gk20a_busy(g);
356 if (err)
357 return err;
358
359 nvgpu_speculation_barrier();
360 switch (cmd) {
361 case NVGPU_AS_IOCTL_BIND_CHANNEL:
362 trace_gk20a_as_ioctl_bind_channel(g->name);
363 err = gk20a_as_ioctl_bind_channel(as_share,
364 (struct nvgpu_as_bind_channel_args *)buf);
365
366 break;
367 case NVGPU32_AS_IOCTL_ALLOC_SPACE:
368 {
369 struct nvgpu32_as_alloc_space_args *args32 =
370 (struct nvgpu32_as_alloc_space_args *)buf;
371 struct nvgpu_as_alloc_space_args args;
372
373 args.pages = args32->pages;
374 args.page_size = args32->page_size;
375 args.flags = args32->flags;
376 args.o_a.offset = args32->o_a.offset;
377 trace_gk20a_as_ioctl_alloc_space(g->name);
378 err = gk20a_as_ioctl_alloc_space(as_share, &args);
379 args32->o_a.offset = args.o_a.offset;
380 break;
381 }
382 case NVGPU_AS_IOCTL_ALLOC_SPACE:
383 trace_gk20a_as_ioctl_alloc_space(g->name);
384 err = gk20a_as_ioctl_alloc_space(as_share,
385 (struct nvgpu_as_alloc_space_args *)buf);
386 break;
387 case NVGPU_AS_IOCTL_FREE_SPACE:
388 trace_gk20a_as_ioctl_free_space(g->name);
389 err = gk20a_as_ioctl_free_space(as_share,
390 (struct nvgpu_as_free_space_args *)buf);
391 break;
392 case NVGPU_AS_IOCTL_MAP_BUFFER_EX:
393 trace_gk20a_as_ioctl_map_buffer(g->name);
394 err = gk20a_as_ioctl_map_buffer_ex(as_share,
395 (struct nvgpu_as_map_buffer_ex_args *)buf);
396 break;
397 case NVGPU_AS_IOCTL_UNMAP_BUFFER:
398 trace_gk20a_as_ioctl_unmap_buffer(g->name);
399 err = gk20a_as_ioctl_unmap_buffer(as_share,
400 (struct nvgpu_as_unmap_buffer_args *)buf);
401 break;
402 case NVGPU_AS_IOCTL_GET_VA_REGIONS:
403 trace_gk20a_as_ioctl_get_va_regions(g->name);
404 err = gk20a_as_ioctl_get_va_regions(as_share,
405 (struct nvgpu_as_get_va_regions_args *)buf);
406 break;
407 case NVGPU_AS_IOCTL_MAP_BUFFER_BATCH:
408 err = gk20a_as_ioctl_map_buffer_batch(as_share,
409 (struct nvgpu_as_map_buffer_batch_args *)buf);
410 break;
411 case NVGPU_AS_IOCTL_GET_SYNC_RO_MAP:
412 err = nvgpu_as_ioctl_get_sync_ro_map(as_share,
413 (struct nvgpu_as_get_sync_ro_map_args *)buf);
414 break;
415 default:
416 err = -ENOTTY;
417 break;
418 }
419
420 gk20a_idle(g);
421
422 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
423 if (copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd)))
424 err = -EFAULT;
425
426 return err;
427}