summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/ioctl_as.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-04-18 15:59:00 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-06-15 20:47:31 -0400
commit2a2c16af5f9f1ccfc93a13e820d5381e5c881e92 (patch)
tree2e5d7b042270a649978e5bb540857012c85fb5b5 /drivers/gpu/nvgpu/common/linux/ioctl_as.c
parent98d996f4ffb0137d119b5849cae46d7b7e5693e1 (diff)
gpu: nvgpu: Move Linux files away from common
Move all Linux source code files to drivers/gpu/nvgpu/os/linux from drivers/gpu/nvgpu/common/linux. This changes the meaning of common to be OS independent. JIRA NVGPU-598 JIRA NVGPU-601 Change-Id: Ib7f2a43d3688bb0d0b7dcc48469a6783fd988ce9 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1747714 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/ioctl_as.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_as.c423
1 files changed, 0 insertions, 423 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_as.c b/drivers/gpu/nvgpu/common/linux/ioctl_as.c
deleted file mode 100644
index 47f612cc..00000000
--- a/drivers/gpu/nvgpu/common/linux/ioctl_as.c
+++ /dev/null
@@ -1,423 +0,0 @@
1/*
2 * GK20A Address Spaces
3 *
4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#include <linux/cdev.h>
17#include <linux/uaccess.h>
18#include <linux/fs.h>
19
20#include <trace/events/gk20a.h>
21
22#include <uapi/linux/nvgpu.h>
23
24#include <nvgpu/gmmu.h>
25#include <nvgpu/vm_area.h>
26#include <nvgpu/log2.h>
27
28#include <nvgpu/linux/vm.h>
29
30#include "gk20a/gk20a.h"
31#include "platform_gk20a.h"
32#include "ioctl_as.h"
33#include "os_linux.h"
34
35static u32 gk20a_as_translate_as_alloc_space_flags(struct gk20a *g, u32 flags)
36{
37 u32 core_flags = 0;
38
39 if (flags & NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET)
40 core_flags |= NVGPU_VM_AREA_ALLOC_FIXED_OFFSET;
41 if (flags & NVGPU_AS_ALLOC_SPACE_FLAGS_SPARSE)
42 core_flags |= NVGPU_VM_AREA_ALLOC_SPARSE;
43
44 return core_flags;
45}
46
47static int gk20a_as_ioctl_bind_channel(
48 struct gk20a_as_share *as_share,
49 struct nvgpu_as_bind_channel_args *args)
50{
51 int err = 0;
52 struct channel_gk20a *ch;
53 struct gk20a *g = gk20a_from_vm(as_share->vm);
54
55 nvgpu_log_fn(g, " ");
56
57 ch = gk20a_get_channel_from_file(args->channel_fd);
58 if (!ch)
59 return -EINVAL;
60
61 if (gk20a_channel_as_bound(ch)) {
62 err = -EINVAL;
63 goto out;
64 }
65
66 /* this will set channel_gk20a->vm */
67 err = ch->g->ops.mm.vm_bind_channel(as_share->vm, ch);
68
69out:
70 gk20a_channel_put(ch);
71 return err;
72}
73
74static int gk20a_as_ioctl_alloc_space(
75 struct gk20a_as_share *as_share,
76 struct nvgpu_as_alloc_space_args *args)
77{
78 struct gk20a *g = gk20a_from_vm(as_share->vm);
79
80 nvgpu_log_fn(g, " ");
81 return nvgpu_vm_area_alloc(as_share->vm, args->pages, args->page_size,
82 &args->o_a.offset,
83 gk20a_as_translate_as_alloc_space_flags(g,
84 args->flags));
85}
86
87static int gk20a_as_ioctl_free_space(
88 struct gk20a_as_share *as_share,
89 struct nvgpu_as_free_space_args *args)
90{
91 struct gk20a *g = gk20a_from_vm(as_share->vm);
92
93 nvgpu_log_fn(g, " ");
94 return nvgpu_vm_area_free(as_share->vm, args->offset);
95}
96
97static int gk20a_as_ioctl_map_buffer_ex(
98 struct gk20a_as_share *as_share,
99 struct nvgpu_as_map_buffer_ex_args *args)
100{
101 struct gk20a *g = gk20a_from_vm(as_share->vm);
102
103 nvgpu_log_fn(g, " ");
104
105 /* unsupported, direct kind control must be used */
106 if (!(args->flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL)) {
107 struct gk20a *g = as_share->vm->mm->g;
108 nvgpu_log_info(g, "Direct kind control must be requested");
109 return -EINVAL;
110 }
111
112 return nvgpu_vm_map_buffer(as_share->vm, args->dmabuf_fd,
113 &args->offset, args->flags,
114 args->compr_kind,
115 args->incompr_kind,
116 args->buffer_offset,
117 args->mapping_size,
118 NULL);
119}
120
121static int gk20a_as_ioctl_unmap_buffer(
122 struct gk20a_as_share *as_share,
123 struct nvgpu_as_unmap_buffer_args *args)
124{
125 struct gk20a *g = gk20a_from_vm(as_share->vm);
126
127 nvgpu_log_fn(g, " ");
128
129 nvgpu_vm_unmap(as_share->vm, args->offset, NULL);
130
131 return 0;
132}
133
134static int gk20a_as_ioctl_map_buffer_batch(
135 struct gk20a_as_share *as_share,
136 struct nvgpu_as_map_buffer_batch_args *args)
137{
138 struct gk20a *g = gk20a_from_vm(as_share->vm);
139 u32 i;
140 int err = 0;
141
142 struct nvgpu_as_unmap_buffer_args __user *user_unmap_args =
143 (struct nvgpu_as_unmap_buffer_args __user *)(uintptr_t)
144 args->unmaps;
145 struct nvgpu_as_map_buffer_ex_args __user *user_map_args =
146 (struct nvgpu_as_map_buffer_ex_args __user *)(uintptr_t)
147 args->maps;
148
149 struct vm_gk20a_mapping_batch batch;
150
151 nvgpu_log_fn(g, " ");
152
153 if (args->num_unmaps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT ||
154 args->num_maps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT)
155 return -EINVAL;
156
157 nvgpu_vm_mapping_batch_start(&batch);
158
159 for (i = 0; i < args->num_unmaps; ++i) {
160 struct nvgpu_as_unmap_buffer_args unmap_args;
161
162 if (copy_from_user(&unmap_args, &user_unmap_args[i],
163 sizeof(unmap_args))) {
164 err = -EFAULT;
165 break;
166 }
167
168 nvgpu_vm_unmap(as_share->vm, unmap_args.offset, &batch);
169 }
170
171 if (err) {
172 nvgpu_vm_mapping_batch_finish(as_share->vm, &batch);
173
174 args->num_unmaps = i;
175 args->num_maps = 0;
176 return err;
177 }
178
179 for (i = 0; i < args->num_maps; ++i) {
180 s16 compressible_kind;
181 s16 incompressible_kind;
182
183 struct nvgpu_as_map_buffer_ex_args map_args;
184 memset(&map_args, 0, sizeof(map_args));
185
186 if (copy_from_user(&map_args, &user_map_args[i],
187 sizeof(map_args))) {
188 err = -EFAULT;
189 break;
190 }
191
192 if (map_args.flags &
193 NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL) {
194 compressible_kind = map_args.compr_kind;
195 incompressible_kind = map_args.incompr_kind;
196 } else {
197 /* direct kind control must be used */
198 err = -EINVAL;
199 break;
200 }
201
202 err = nvgpu_vm_map_buffer(
203 as_share->vm, map_args.dmabuf_fd,
204 &map_args.offset, map_args.flags,
205 compressible_kind, incompressible_kind,
206 map_args.buffer_offset,
207 map_args.mapping_size,
208 &batch);
209 if (err)
210 break;
211 }
212
213 nvgpu_vm_mapping_batch_finish(as_share->vm, &batch);
214
215 if (err)
216 args->num_maps = i;
217 /* note: args->num_unmaps will be unmodified, which is ok
218 * since all unmaps are done */
219
220 return err;
221}
222
223static int gk20a_as_ioctl_get_va_regions(
224 struct gk20a_as_share *as_share,
225 struct nvgpu_as_get_va_regions_args *args)
226{
227 unsigned int i;
228 unsigned int write_entries;
229 struct nvgpu_as_va_region __user *user_region_ptr;
230 struct vm_gk20a *vm = as_share->vm;
231 struct gk20a *g = gk20a_from_vm(vm);
232 unsigned int page_sizes = gmmu_page_size_kernel;
233
234 nvgpu_log_fn(g, " ");
235
236 if (!vm->big_pages)
237 page_sizes--;
238
239 write_entries = args->buf_size / sizeof(struct nvgpu_as_va_region);
240 if (write_entries > page_sizes)
241 write_entries = page_sizes;
242
243 user_region_ptr =
244 (struct nvgpu_as_va_region __user *)(uintptr_t)args->buf_addr;
245
246 for (i = 0; i < write_entries; ++i) {
247 struct nvgpu_as_va_region region;
248 struct nvgpu_allocator *vma = vm->vma[i];
249
250 memset(&region, 0, sizeof(struct nvgpu_as_va_region));
251
252 region.page_size = vm->gmmu_page_sizes[i];
253 region.offset = nvgpu_alloc_base(vma);
254 /* No __aeabi_uldivmod() on some platforms... */
255 region.pages = (nvgpu_alloc_end(vma) -
256 nvgpu_alloc_base(vma)) >> ilog2(region.page_size);
257
258 if (copy_to_user(user_region_ptr + i, &region, sizeof(region)))
259 return -EFAULT;
260 }
261
262 args->buf_size =
263 page_sizes * sizeof(struct nvgpu_as_va_region);
264
265 return 0;
266}
267
268static int nvgpu_as_ioctl_get_sync_ro_map(
269 struct gk20a_as_share *as_share,
270 struct nvgpu_as_get_sync_ro_map_args *args)
271{
272#ifdef CONFIG_TEGRA_GK20A_NVHOST
273 struct vm_gk20a *vm = as_share->vm;
274 struct gk20a *g = gk20a_from_vm(vm);
275 u64 base_gpuva;
276 u32 sync_size;
277 int err = 0;
278
279 if (!g->ops.fifo.get_sync_ro_map)
280 return -EINVAL;
281
282 if (!gk20a_platform_has_syncpoints(g))
283 return -EINVAL;
284
285 err = g->ops.fifo.get_sync_ro_map(vm, &base_gpuva, &sync_size);
286 if (err)
287 return err;
288
289 args->base_gpuva = base_gpuva;
290 args->sync_size = sync_size;
291
292 return err;
293#else
294 return -EINVAL;
295#endif
296}
297
298int gk20a_as_dev_open(struct inode *inode, struct file *filp)
299{
300 struct nvgpu_os_linux *l;
301 struct gk20a_as_share *as_share;
302 struct gk20a *g;
303 int err;
304
305 l = container_of(inode->i_cdev, struct nvgpu_os_linux, as_dev.cdev);
306 g = &l->g;
307
308 nvgpu_log_fn(g, " ");
309
310 err = gk20a_as_alloc_share(g, 0, 0, &as_share);
311 if (err) {
312 nvgpu_log_fn(g, "failed to alloc share");
313 return err;
314 }
315
316 filp->private_data = as_share;
317 return 0;
318}
319
320int gk20a_as_dev_release(struct inode *inode, struct file *filp)
321{
322 struct gk20a_as_share *as_share = filp->private_data;
323
324 if (!as_share)
325 return 0;
326
327 return gk20a_as_release_share(as_share);
328}
329
330long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
331{
332 int err = 0;
333 struct gk20a_as_share *as_share = filp->private_data;
334 struct gk20a *g = gk20a_from_as(as_share->as);
335
336 u8 buf[NVGPU_AS_IOCTL_MAX_ARG_SIZE];
337
338 nvgpu_log_fn(g, "start %d", _IOC_NR(cmd));
339
340 if ((_IOC_TYPE(cmd) != NVGPU_AS_IOCTL_MAGIC) ||
341 (_IOC_NR(cmd) == 0) ||
342 (_IOC_NR(cmd) > NVGPU_AS_IOCTL_LAST) ||
343 (_IOC_SIZE(cmd) > NVGPU_AS_IOCTL_MAX_ARG_SIZE))
344 return -EINVAL;
345
346 memset(buf, 0, sizeof(buf));
347 if (_IOC_DIR(cmd) & _IOC_WRITE) {
348 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
349 return -EFAULT;
350 }
351
352 err = gk20a_busy(g);
353 if (err)
354 return err;
355
356 switch (cmd) {
357 case NVGPU_AS_IOCTL_BIND_CHANNEL:
358 trace_gk20a_as_ioctl_bind_channel(g->name);
359 err = gk20a_as_ioctl_bind_channel(as_share,
360 (struct nvgpu_as_bind_channel_args *)buf);
361
362 break;
363 case NVGPU32_AS_IOCTL_ALLOC_SPACE:
364 {
365 struct nvgpu32_as_alloc_space_args *args32 =
366 (struct nvgpu32_as_alloc_space_args *)buf;
367 struct nvgpu_as_alloc_space_args args;
368
369 args.pages = args32->pages;
370 args.page_size = args32->page_size;
371 args.flags = args32->flags;
372 args.o_a.offset = args32->o_a.offset;
373 trace_gk20a_as_ioctl_alloc_space(g->name);
374 err = gk20a_as_ioctl_alloc_space(as_share, &args);
375 args32->o_a.offset = args.o_a.offset;
376 break;
377 }
378 case NVGPU_AS_IOCTL_ALLOC_SPACE:
379 trace_gk20a_as_ioctl_alloc_space(g->name);
380 err = gk20a_as_ioctl_alloc_space(as_share,
381 (struct nvgpu_as_alloc_space_args *)buf);
382 break;
383 case NVGPU_AS_IOCTL_FREE_SPACE:
384 trace_gk20a_as_ioctl_free_space(g->name);
385 err = gk20a_as_ioctl_free_space(as_share,
386 (struct nvgpu_as_free_space_args *)buf);
387 break;
388 case NVGPU_AS_IOCTL_MAP_BUFFER_EX:
389 trace_gk20a_as_ioctl_map_buffer(g->name);
390 err = gk20a_as_ioctl_map_buffer_ex(as_share,
391 (struct nvgpu_as_map_buffer_ex_args *)buf);
392 break;
393 case NVGPU_AS_IOCTL_UNMAP_BUFFER:
394 trace_gk20a_as_ioctl_unmap_buffer(g->name);
395 err = gk20a_as_ioctl_unmap_buffer(as_share,
396 (struct nvgpu_as_unmap_buffer_args *)buf);
397 break;
398 case NVGPU_AS_IOCTL_GET_VA_REGIONS:
399 trace_gk20a_as_ioctl_get_va_regions(g->name);
400 err = gk20a_as_ioctl_get_va_regions(as_share,
401 (struct nvgpu_as_get_va_regions_args *)buf);
402 break;
403 case NVGPU_AS_IOCTL_MAP_BUFFER_BATCH:
404 err = gk20a_as_ioctl_map_buffer_batch(as_share,
405 (struct nvgpu_as_map_buffer_batch_args *)buf);
406 break;
407 case NVGPU_AS_IOCTL_GET_SYNC_RO_MAP:
408 err = nvgpu_as_ioctl_get_sync_ro_map(as_share,
409 (struct nvgpu_as_get_sync_ro_map_args *)buf);
410 break;
411 default:
412 err = -ENOTTY;
413 break;
414 }
415
416 gk20a_idle(g);
417
418 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
419 if (copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd)))
420 err = -EFAULT;
421
422 return err;
423}