summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/ioctl_as.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/ioctl_as.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_as.c366
1 files changed, 366 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_as.c b/drivers/gpu/nvgpu/common/linux/ioctl_as.c
new file mode 100644
index 00000000..848fee04
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_as.c
@@ -0,0 +1,366 @@
1/*
2 * GK20A Address Spaces
3 *
4 * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#include <linux/cdev.h>
17#include <linux/uaccess.h>
18#include <linux/fs.h>
19
20#include <trace/events/gk20a.h>
21
22#include <uapi/linux/nvgpu.h>
23
24#include <nvgpu/gmmu.h>
25#include <nvgpu/vm_area.h>
26#include <nvgpu/log2.h>
27
28#include <nvgpu/linux/vm.h>
29
30#include "gk20a/gk20a.h"
31#include "platform_gk20a.h"
32#include "ioctl_as.h"
33#include "os_linux.h"
34
35static int gk20a_as_ioctl_bind_channel(
36 struct gk20a_as_share *as_share,
37 struct nvgpu_as_bind_channel_args *args)
38{
39 int err = 0;
40 struct channel_gk20a *ch;
41
42 gk20a_dbg_fn("");
43
44 ch = gk20a_get_channel_from_file(args->channel_fd);
45 if (!ch)
46 return -EINVAL;
47
48 if (gk20a_channel_as_bound(ch)) {
49 err = -EINVAL;
50 goto out;
51 }
52
53 /* this will set channel_gk20a->vm */
54 err = ch->g->ops.mm.vm_bind_channel(as_share, ch);
55
56out:
57 gk20a_channel_put(ch);
58 return err;
59}
60
61static int gk20a_as_ioctl_alloc_space(
62 struct gk20a_as_share *as_share,
63 struct nvgpu_as_alloc_space_args *args)
64{
65 gk20a_dbg_fn("");
66 return nvgpu_vm_area_alloc(as_share->vm, args->pages, args->page_size,
67 &args->o_a.offset, args->flags);
68}
69
70static int gk20a_as_ioctl_free_space(
71 struct gk20a_as_share *as_share,
72 struct nvgpu_as_free_space_args *args)
73{
74 gk20a_dbg_fn("");
75 return nvgpu_vm_area_free(as_share->vm, args->offset);
76}
77
78static int gk20a_as_ioctl_map_buffer_ex(
79 struct gk20a_as_share *as_share,
80 struct nvgpu_as_map_buffer_ex_args *args)
81{
82 gk20a_dbg_fn("");
83
84 /* unsupported, direct kind control must be used */
85 if (!(args->flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL)) {
86 struct gk20a *g = as_share->vm->mm->g;
87 nvgpu_log_info(g, "Direct kind control must be requested");
88 return -EINVAL;
89 }
90
91 return nvgpu_vm_map_buffer(as_share->vm, args->dmabuf_fd,
92 &args->offset, args->flags,
93 args->compr_kind,
94 args->incompr_kind,
95 args->buffer_offset,
96 args->mapping_size,
97 NULL);
98}
99
100static int gk20a_as_ioctl_unmap_buffer(
101 struct gk20a_as_share *as_share,
102 struct nvgpu_as_unmap_buffer_args *args)
103{
104 gk20a_dbg_fn("");
105
106 nvgpu_vm_unmap(as_share->vm, args->offset, NULL);
107
108 return 0;
109}
110
111static int gk20a_as_ioctl_map_buffer_batch(
112 struct gk20a_as_share *as_share,
113 struct nvgpu_as_map_buffer_batch_args *args)
114{
115 u32 i;
116 int err = 0;
117
118 struct nvgpu_as_unmap_buffer_args __user *user_unmap_args =
119 (struct nvgpu_as_unmap_buffer_args __user *)(uintptr_t)
120 args->unmaps;
121 struct nvgpu_as_map_buffer_ex_args __user *user_map_args =
122 (struct nvgpu_as_map_buffer_ex_args __user *)(uintptr_t)
123 args->maps;
124
125 struct vm_gk20a_mapping_batch batch;
126
127 gk20a_dbg_fn("");
128
129 if (args->num_unmaps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT ||
130 args->num_maps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT)
131 return -EINVAL;
132
133 nvgpu_vm_mapping_batch_start(&batch);
134
135 for (i = 0; i < args->num_unmaps; ++i) {
136 struct nvgpu_as_unmap_buffer_args unmap_args;
137
138 if (copy_from_user(&unmap_args, &user_unmap_args[i],
139 sizeof(unmap_args))) {
140 err = -EFAULT;
141 break;
142 }
143
144 nvgpu_vm_unmap(as_share->vm, unmap_args.offset, &batch);
145 }
146
147 if (err) {
148 nvgpu_vm_mapping_batch_finish(as_share->vm, &batch);
149
150 args->num_unmaps = i;
151 args->num_maps = 0;
152 return err;
153 }
154
155 for (i = 0; i < args->num_maps; ++i) {
156 s16 compressible_kind;
157 s16 incompressible_kind;
158
159 struct nvgpu_as_map_buffer_ex_args map_args;
160 memset(&map_args, 0, sizeof(map_args));
161
162 if (copy_from_user(&map_args, &user_map_args[i],
163 sizeof(map_args))) {
164 err = -EFAULT;
165 break;
166 }
167
168 if (map_args.flags &
169 NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL) {
170 compressible_kind = map_args.compr_kind;
171 incompressible_kind = map_args.incompr_kind;
172 } else {
173 /* direct kind control must be used */
174 err = -EINVAL;
175 break;
176 }
177
178 err = nvgpu_vm_map_buffer(
179 as_share->vm, map_args.dmabuf_fd,
180 &map_args.offset, map_args.flags,
181 compressible_kind, incompressible_kind,
182 map_args.buffer_offset,
183 map_args.mapping_size,
184 &batch);
185 if (err)
186 break;
187 }
188
189 nvgpu_vm_mapping_batch_finish(as_share->vm, &batch);
190
191 if (err)
192 args->num_maps = i;
193 /* note: args->num_unmaps will be unmodified, which is ok
194 * since all unmaps are done */
195
196 return err;
197}
198
199static int gk20a_as_ioctl_get_va_regions(
200 struct gk20a_as_share *as_share,
201 struct nvgpu_as_get_va_regions_args *args)
202{
203 unsigned int i;
204 unsigned int write_entries;
205 struct nvgpu_as_va_region __user *user_region_ptr;
206 struct vm_gk20a *vm = as_share->vm;
207 unsigned int page_sizes = gmmu_page_size_kernel;
208
209 gk20a_dbg_fn("");
210
211 if (!vm->big_pages)
212 page_sizes--;
213
214 write_entries = args->buf_size / sizeof(struct nvgpu_as_va_region);
215 if (write_entries > page_sizes)
216 write_entries = page_sizes;
217
218 user_region_ptr =
219 (struct nvgpu_as_va_region __user *)(uintptr_t)args->buf_addr;
220
221 for (i = 0; i < write_entries; ++i) {
222 struct nvgpu_as_va_region region;
223 struct nvgpu_allocator *vma = vm->vma[i];
224
225 memset(&region, 0, sizeof(struct nvgpu_as_va_region));
226
227 region.page_size = vm->gmmu_page_sizes[i];
228 region.offset = nvgpu_alloc_base(vma);
229 /* No __aeabi_uldivmod() on some platforms... */
230 region.pages = (nvgpu_alloc_end(vma) -
231 nvgpu_alloc_base(vma)) >> ilog2(region.page_size);
232
233 if (copy_to_user(user_region_ptr + i, &region, sizeof(region)))
234 return -EFAULT;
235 }
236
237 args->buf_size =
238 page_sizes * sizeof(struct nvgpu_as_va_region);
239
240 return 0;
241}
242
243int gk20a_as_dev_open(struct inode *inode, struct file *filp)
244{
245 struct nvgpu_os_linux *l;
246 struct gk20a_as_share *as_share;
247 struct gk20a *g;
248 int err;
249
250 gk20a_dbg_fn("");
251
252 l = container_of(inode->i_cdev, struct nvgpu_os_linux, as_dev.cdev);
253 g = &l->g;
254
255 err = gk20a_as_alloc_share(g, 0, 0, &as_share);
256 if (err) {
257 gk20a_dbg_fn("failed to alloc share");
258 return err;
259 }
260
261 filp->private_data = as_share;
262 return 0;
263}
264
265int gk20a_as_dev_release(struct inode *inode, struct file *filp)
266{
267 struct gk20a_as_share *as_share = filp->private_data;
268
269 gk20a_dbg_fn("");
270
271 if (!as_share)
272 return 0;
273
274 return gk20a_as_release_share(as_share);
275}
276
277long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
278{
279 int err = 0;
280 struct gk20a_as_share *as_share = filp->private_data;
281 struct gk20a *g = gk20a_from_as(as_share->as);
282
283 u8 buf[NVGPU_AS_IOCTL_MAX_ARG_SIZE];
284
285 gk20a_dbg_fn("start %d", _IOC_NR(cmd));
286
287 if ((_IOC_TYPE(cmd) != NVGPU_AS_IOCTL_MAGIC) ||
288 (_IOC_NR(cmd) == 0) ||
289 (_IOC_NR(cmd) > NVGPU_AS_IOCTL_LAST) ||
290 (_IOC_SIZE(cmd) > NVGPU_AS_IOCTL_MAX_ARG_SIZE))
291 return -EINVAL;
292
293 memset(buf, 0, sizeof(buf));
294 if (_IOC_DIR(cmd) & _IOC_WRITE) {
295 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
296 return -EFAULT;
297 }
298
299 err = gk20a_busy(g);
300 if (err)
301 return err;
302
303 switch (cmd) {
304 case NVGPU_AS_IOCTL_BIND_CHANNEL:
305 trace_gk20a_as_ioctl_bind_channel(g->name);
306 err = gk20a_as_ioctl_bind_channel(as_share,
307 (struct nvgpu_as_bind_channel_args *)buf);
308
309 break;
310 case NVGPU32_AS_IOCTL_ALLOC_SPACE:
311 {
312 struct nvgpu32_as_alloc_space_args *args32 =
313 (struct nvgpu32_as_alloc_space_args *)buf;
314 struct nvgpu_as_alloc_space_args args;
315
316 args.pages = args32->pages;
317 args.page_size = args32->page_size;
318 args.flags = args32->flags;
319 args.o_a.offset = args32->o_a.offset;
320 trace_gk20a_as_ioctl_alloc_space(g->name);
321 err = gk20a_as_ioctl_alloc_space(as_share, &args);
322 args32->o_a.offset = args.o_a.offset;
323 break;
324 }
325 case NVGPU_AS_IOCTL_ALLOC_SPACE:
326 trace_gk20a_as_ioctl_alloc_space(g->name);
327 err = gk20a_as_ioctl_alloc_space(as_share,
328 (struct nvgpu_as_alloc_space_args *)buf);
329 break;
330 case NVGPU_AS_IOCTL_FREE_SPACE:
331 trace_gk20a_as_ioctl_free_space(g->name);
332 err = gk20a_as_ioctl_free_space(as_share,
333 (struct nvgpu_as_free_space_args *)buf);
334 break;
335 case NVGPU_AS_IOCTL_MAP_BUFFER_EX:
336 trace_gk20a_as_ioctl_map_buffer(g->name);
337 err = gk20a_as_ioctl_map_buffer_ex(as_share,
338 (struct nvgpu_as_map_buffer_ex_args *)buf);
339 break;
340 case NVGPU_AS_IOCTL_UNMAP_BUFFER:
341 trace_gk20a_as_ioctl_unmap_buffer(g->name);
342 err = gk20a_as_ioctl_unmap_buffer(as_share,
343 (struct nvgpu_as_unmap_buffer_args *)buf);
344 break;
345 case NVGPU_AS_IOCTL_GET_VA_REGIONS:
346 trace_gk20a_as_ioctl_get_va_regions(g->name);
347 err = gk20a_as_ioctl_get_va_regions(as_share,
348 (struct nvgpu_as_get_va_regions_args *)buf);
349 break;
350 case NVGPU_AS_IOCTL_MAP_BUFFER_BATCH:
351 err = gk20a_as_ioctl_map_buffer_batch(as_share,
352 (struct nvgpu_as_map_buffer_batch_args *)buf);
353 break;
354 default:
355 err = -ENOTTY;
356 break;
357 }
358
359 gk20a_idle(g);
360
361 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
362 if (copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd)))
363 err = -EFAULT;
364
365 return err;
366}