summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/ioctl_as.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/ioctl_as.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_as.c377
1 files changed, 377 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_as.c b/drivers/gpu/nvgpu/common/linux/ioctl_as.c
new file mode 100644
index 00000000..ff9787db
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_as.c
@@ -0,0 +1,377 @@
1/*
2 * GK20A Address Spaces
3 *
4 * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#include <linux/cdev.h>
17#include <linux/uaccess.h>
18
19#include <trace/events/gk20a.h>
20
21#include <uapi/linux/nvgpu.h>
22
23#include "gk20a/gk20a.h"
24
25static int gk20a_as_ioctl_bind_channel(
26 struct gk20a_as_share *as_share,
27 struct nvgpu_as_bind_channel_args *args)
28{
29 int err = 0;
30 struct channel_gk20a *ch;
31
32 gk20a_dbg_fn("");
33
34 ch = gk20a_get_channel_from_file(args->channel_fd);
35 if (!ch || gk20a_channel_as_bound(ch))
36 return -EINVAL;
37
38 /* this will set channel_gk20a->vm */
39 err = ch->g->ops.mm.vm_bind_channel(as_share, ch);
40 if (err)
41 return err;
42
43 return err;
44}
45
46static int gk20a_as_ioctl_alloc_space(
47 struct gk20a_as_share *as_share,
48 struct nvgpu_as_alloc_space_args *args)
49{
50 gk20a_dbg_fn("");
51 return gk20a_vm_alloc_space(as_share, args);
52}
53
54static int gk20a_as_ioctl_free_space(
55 struct gk20a_as_share *as_share,
56 struct nvgpu_as_free_space_args *args)
57{
58 gk20a_dbg_fn("");
59 return gk20a_vm_free_space(as_share, args);
60}
61
62static int gk20a_as_ioctl_map_buffer_ex(
63 struct gk20a_as_share *as_share,
64 struct nvgpu_as_map_buffer_ex_args *args)
65{
66 gk20a_dbg_fn("");
67
68 return gk20a_vm_map_buffer(as_share->vm, args->dmabuf_fd,
69 &args->offset, args->flags,
70 args->kind,
71 args->buffer_offset,
72 args->mapping_size,
73 NULL);
74}
75
76static int gk20a_as_ioctl_map_buffer(
77 struct gk20a_as_share *as_share,
78 struct nvgpu_as_map_buffer_args *args)
79{
80 gk20a_dbg_fn("");
81 return gk20a_vm_map_buffer(as_share->vm, args->dmabuf_fd,
82 &args->o_a.offset,
83 args->flags, NV_KIND_DEFAULT,
84 0, 0, NULL);
85 /* args->o_a.offset will be set if !err */
86}
87
88static int gk20a_as_ioctl_unmap_buffer(
89 struct gk20a_as_share *as_share,
90 struct nvgpu_as_unmap_buffer_args *args)
91{
92 gk20a_dbg_fn("");
93 return gk20a_vm_unmap_buffer(as_share->vm, args->offset, NULL);
94}
95
96static int gk20a_as_ioctl_map_buffer_batch(
97 struct gk20a_as_share *as_share,
98 struct nvgpu_as_map_buffer_batch_args *args)
99{
100 struct gk20a *g = as_share->vm->mm->g;
101 u32 i;
102 int err = 0;
103
104 struct nvgpu_as_unmap_buffer_args __user *user_unmap_args =
105 (struct nvgpu_as_unmap_buffer_args __user *)(uintptr_t)
106 args->unmaps;
107 struct nvgpu_as_map_buffer_ex_args __user *user_map_args =
108 (struct nvgpu_as_map_buffer_ex_args __user *)(uintptr_t)
109 args->maps;
110
111 struct vm_gk20a_mapping_batch batch;
112
113 gk20a_dbg_fn("");
114
115 if (args->num_unmaps > g->gpu_characteristics.map_buffer_batch_limit ||
116 args->num_maps > g->gpu_characteristics.map_buffer_batch_limit)
117 return -EINVAL;
118
119 gk20a_vm_mapping_batch_start(&batch);
120
121 for (i = 0; i < args->num_unmaps; ++i) {
122 struct nvgpu_as_unmap_buffer_args unmap_args;
123
124 if (copy_from_user(&unmap_args, &user_unmap_args[i],
125 sizeof(unmap_args))) {
126 err = -EFAULT;
127 break;
128 }
129
130 err = gk20a_vm_unmap_buffer(as_share->vm, unmap_args.offset,
131 &batch);
132 if (err)
133 break;
134 }
135
136 if (err) {
137 gk20a_vm_mapping_batch_finish(as_share->vm, &batch);
138
139 args->num_unmaps = i;
140 args->num_maps = 0;
141 return err;
142 }
143
144 for (i = 0; i < args->num_maps; ++i) {
145 struct nvgpu_as_map_buffer_ex_args map_args;
146 memset(&map_args, 0, sizeof(map_args));
147
148 if (copy_from_user(&map_args, &user_map_args[i],
149 sizeof(map_args))) {
150 err = -EFAULT;
151 break;
152 }
153
154 err = gk20a_vm_map_buffer(
155 as_share->vm, map_args.dmabuf_fd,
156 &map_args.offset, map_args.flags,
157 map_args.kind,
158 map_args.buffer_offset,
159 map_args.mapping_size,
160 &batch);
161 if (err)
162 break;
163 }
164
165 gk20a_vm_mapping_batch_finish(as_share->vm, &batch);
166
167 if (err)
168 args->num_maps = i;
169 /* note: args->num_unmaps will be unmodified, which is ok
170 * since all unmaps are done */
171
172 return err;
173}
174
175static int gk20a_as_ioctl_get_va_regions(
176 struct gk20a_as_share *as_share,
177 struct nvgpu_as_get_va_regions_args *args)
178{
179 unsigned int i;
180 unsigned int write_entries;
181 struct nvgpu_as_va_region __user *user_region_ptr;
182 struct vm_gk20a *vm = as_share->vm;
183 unsigned int page_sizes = gmmu_page_size_kernel;
184
185 gk20a_dbg_fn("");
186
187 if (!vm->big_pages)
188 page_sizes--;
189
190 write_entries = args->buf_size / sizeof(struct nvgpu_as_va_region);
191 if (write_entries > page_sizes)
192 write_entries = page_sizes;
193
194 user_region_ptr =
195 (struct nvgpu_as_va_region __user *)(uintptr_t)args->buf_addr;
196
197 for (i = 0; i < write_entries; ++i) {
198 struct nvgpu_as_va_region region;
199 struct nvgpu_allocator *vma = vm->vma[i];
200
201 memset(&region, 0, sizeof(struct nvgpu_as_va_region));
202
203 region.page_size = vm->gmmu_page_sizes[i];
204 region.offset = nvgpu_alloc_base(vma);
205 /* No __aeabi_uldivmod() on some platforms... */
206 region.pages = (nvgpu_alloc_end(vma) -
207 nvgpu_alloc_base(vma)) >> ilog2(region.page_size);
208
209 if (copy_to_user(user_region_ptr + i, &region, sizeof(region)))
210 return -EFAULT;
211 }
212
213 args->buf_size =
214 page_sizes * sizeof(struct nvgpu_as_va_region);
215
216 return 0;
217}
218
219static int gk20a_as_ioctl_get_buffer_compbits_info(
220 struct gk20a_as_share *as_share,
221 struct nvgpu_as_get_buffer_compbits_info_args *args)
222{
223 gk20a_dbg_fn("");
224 return gk20a_vm_get_compbits_info(as_share->vm,
225 args->mapping_gva,
226 &args->compbits_win_size,
227 &args->compbits_win_ctagline,
228 &args->mapping_ctagline,
229 &args->flags);
230}
231
232static int gk20a_as_ioctl_map_buffer_compbits(
233 struct gk20a_as_share *as_share,
234 struct nvgpu_as_map_buffer_compbits_args *args)
235{
236 gk20a_dbg_fn("");
237 return gk20a_vm_map_compbits(as_share->vm,
238 args->mapping_gva,
239 &args->compbits_win_gva,
240 &args->mapping_iova,
241 args->flags);
242}
243
244int gk20a_as_dev_open(struct inode *inode, struct file *filp)
245{
246 struct gk20a_as_share *as_share;
247 struct gk20a *g;
248 int err;
249
250 gk20a_dbg_fn("");
251
252 g = container_of(inode->i_cdev, struct gk20a, as_dev.cdev);
253
254 err = gk20a_as_alloc_share(g, 0, 0, &as_share);
255 if (err) {
256 gk20a_dbg_fn("failed to alloc share");
257 return err;
258 }
259
260 filp->private_data = as_share;
261 return 0;
262}
263
264int gk20a_as_dev_release(struct inode *inode, struct file *filp)
265{
266 struct gk20a_as_share *as_share = filp->private_data;
267
268 gk20a_dbg_fn("");
269
270 if (!as_share)
271 return 0;
272
273 return gk20a_as_release_share(as_share);
274}
275
276long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
277{
278 int err = 0;
279 struct gk20a_as_share *as_share = filp->private_data;
280 struct gk20a *g = gk20a_from_as(as_share->as);
281
282 u8 buf[NVGPU_AS_IOCTL_MAX_ARG_SIZE];
283
284 if ((_IOC_TYPE(cmd) != NVGPU_AS_IOCTL_MAGIC) ||
285 (_IOC_NR(cmd) == 0) ||
286 (_IOC_NR(cmd) > NVGPU_AS_IOCTL_LAST) ||
287 (_IOC_SIZE(cmd) > NVGPU_AS_IOCTL_MAX_ARG_SIZE))
288 return -EINVAL;
289
290 memset(buf, 0, sizeof(buf));
291 if (_IOC_DIR(cmd) & _IOC_WRITE) {
292 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
293 return -EFAULT;
294 }
295
296 err = gk20a_busy(g);
297 if (err)
298 return err;
299
300 switch (cmd) {
301 case NVGPU_AS_IOCTL_BIND_CHANNEL:
302 trace_gk20a_as_ioctl_bind_channel(g->name);
303 err = gk20a_as_ioctl_bind_channel(as_share,
304 (struct nvgpu_as_bind_channel_args *)buf);
305
306 break;
307 case NVGPU32_AS_IOCTL_ALLOC_SPACE:
308 {
309 struct nvgpu32_as_alloc_space_args *args32 =
310 (struct nvgpu32_as_alloc_space_args *)buf;
311 struct nvgpu_as_alloc_space_args args;
312
313 args.pages = args32->pages;
314 args.page_size = args32->page_size;
315 args.flags = args32->flags;
316 args.o_a.offset = args32->o_a.offset;
317 trace_gk20a_as_ioctl_alloc_space(g->name);
318 err = gk20a_as_ioctl_alloc_space(as_share, &args);
319 args32->o_a.offset = args.o_a.offset;
320 break;
321 }
322 case NVGPU_AS_IOCTL_ALLOC_SPACE:
323 trace_gk20a_as_ioctl_alloc_space(g->name);
324 err = gk20a_as_ioctl_alloc_space(as_share,
325 (struct nvgpu_as_alloc_space_args *)buf);
326 break;
327 case NVGPU_AS_IOCTL_FREE_SPACE:
328 trace_gk20a_as_ioctl_free_space(g->name);
329 err = gk20a_as_ioctl_free_space(as_share,
330 (struct nvgpu_as_free_space_args *)buf);
331 break;
332 case NVGPU_AS_IOCTL_MAP_BUFFER:
333 trace_gk20a_as_ioctl_map_buffer(g->name);
334 err = gk20a_as_ioctl_map_buffer(as_share,
335 (struct nvgpu_as_map_buffer_args *)buf);
336 break;
337 case NVGPU_AS_IOCTL_MAP_BUFFER_EX:
338 trace_gk20a_as_ioctl_map_buffer(g->name);
339 err = gk20a_as_ioctl_map_buffer_ex(as_share,
340 (struct nvgpu_as_map_buffer_ex_args *)buf);
341 break;
342 case NVGPU_AS_IOCTL_UNMAP_BUFFER:
343 trace_gk20a_as_ioctl_unmap_buffer(g->name);
344 err = gk20a_as_ioctl_unmap_buffer(as_share,
345 (struct nvgpu_as_unmap_buffer_args *)buf);
346 break;
347 case NVGPU_AS_IOCTL_GET_VA_REGIONS:
348 trace_gk20a_as_ioctl_get_va_regions(g->name);
349 err = gk20a_as_ioctl_get_va_regions(as_share,
350 (struct nvgpu_as_get_va_regions_args *)buf);
351 break;
352 case NVGPU_AS_IOCTL_GET_BUFFER_COMPBITS_INFO:
353 err = gk20a_as_ioctl_get_buffer_compbits_info(as_share,
354 (struct nvgpu_as_get_buffer_compbits_info_args *)buf);
355 break;
356 case NVGPU_AS_IOCTL_MAP_BUFFER_COMPBITS:
357 err = gk20a_as_ioctl_map_buffer_compbits(as_share,
358 (struct nvgpu_as_map_buffer_compbits_args *)buf);
359 break;
360 case NVGPU_AS_IOCTL_MAP_BUFFER_BATCH:
361 err = gk20a_as_ioctl_map_buffer_batch(as_share,
362 (struct nvgpu_as_map_buffer_batch_args *)buf);
363 break;
364 default:
365 dev_dbg(dev_from_gk20a(g), "unrecognized as ioctl: 0x%x", cmd);
366 err = -ENOTTY;
367 break;
368 }
369
370 gk20a_idle(g);
371
372 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
373 if (copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd)))
374 err = -EFAULT;
375
376 return err;
377}