summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/as_gk20a.c461
-rw-r--r--drivers/gpu/nvgpu/gk20a/as_gk20a.h49
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h8
3 files changed, 7 insertions, 511 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/as_gk20a.c b/drivers/gpu/nvgpu/gk20a/as_gk20a.c
deleted file mode 100644
index 5acc626b..00000000
--- a/drivers/gpu/nvgpu/gk20a/as_gk20a.c
+++ /dev/null
@@ -1,461 +0,0 @@
1/*
2 * GK20A Address Spaces
3 *
4 * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#include <linux/slab.h>
17#include <linux/fs.h>
18#include <linux/cdev.h>
19#include <linux/uaccess.h>
20
21#include <trace/events/gk20a.h>
22
23#include <uapi/linux/nvgpu.h>
24
25#include <nvgpu/kmem.h>
26
27#include "gk20a.h"
28
29/* dumb allocator... */
30static int generate_as_share_id(struct gk20a_as *as)
31{
32 gk20a_dbg_fn("");
33 return ++as->last_share_id;
34}
35/* still dumb */
36static void release_as_share_id(struct gk20a_as *as, int id)
37{
38 gk20a_dbg_fn("");
39 return;
40}
41
42int gk20a_as_alloc_share(struct gk20a_as *as,
43 u32 big_page_size, u32 flags,
44 struct gk20a_as_share **out)
45{
46 struct gk20a *g = gk20a_from_as(as);
47 struct gk20a_as_share *as_share;
48 int err = 0;
49
50 gk20a_dbg_fn("");
51 g = gk20a_get(g);
52 if (!g)
53 return -ENODEV;
54
55 *out = NULL;
56 as_share = nvgpu_kzalloc(g, sizeof(*as_share));
57 if (!as_share)
58 return -ENOMEM;
59
60 as_share->as = as;
61 as_share->id = generate_as_share_id(as_share->as);
62
63 /* this will set as_share->vm. */
64 err = gk20a_busy(g);
65 if (err)
66 goto failed;
67 err = g->ops.mm.vm_alloc_share(as_share, big_page_size, flags);
68 gk20a_idle(g);
69
70 if (err)
71 goto failed;
72
73 *out = as_share;
74 return 0;
75
76failed:
77 nvgpu_kfree(g, as_share);
78 return err;
79}
80
81/*
82 * channels and the device nodes call this to release.
83 * once the ref_cnt hits zero the share is deleted.
84 */
85int gk20a_as_release_share(struct gk20a_as_share *as_share)
86{
87 struct gk20a *g = as_share->vm->mm->g;
88 int err;
89
90 gk20a_dbg_fn("");
91
92 err = gk20a_busy(g);
93
94 if (err)
95 goto release_fail;
96
97 err = gk20a_vm_release_share(as_share);
98
99 gk20a_idle(g);
100
101release_fail:
102 release_as_share_id(as_share->as, as_share->id);
103 nvgpu_kfree(g, as_share);
104 gk20a_put(g);
105
106 return err;
107}
108
109static int gk20a_as_ioctl_bind_channel(
110 struct gk20a_as_share *as_share,
111 struct nvgpu_as_bind_channel_args *args)
112{
113 int err = 0;
114 struct channel_gk20a *ch;
115
116 gk20a_dbg_fn("");
117
118 ch = gk20a_get_channel_from_file(args->channel_fd);
119 if (!ch || gk20a_channel_as_bound(ch))
120 return -EINVAL;
121
122 /* this will set channel_gk20a->vm */
123 err = ch->g->ops.mm.vm_bind_channel(as_share, ch);
124 if (err)
125 return err;
126
127 return err;
128}
129
130static int gk20a_as_ioctl_alloc_space(
131 struct gk20a_as_share *as_share,
132 struct nvgpu_as_alloc_space_args *args)
133{
134 gk20a_dbg_fn("");
135 return gk20a_vm_alloc_space(as_share, args);
136}
137
138static int gk20a_as_ioctl_free_space(
139 struct gk20a_as_share *as_share,
140 struct nvgpu_as_free_space_args *args)
141{
142 gk20a_dbg_fn("");
143 return gk20a_vm_free_space(as_share, args);
144}
145
146static int gk20a_as_ioctl_map_buffer_ex(
147 struct gk20a_as_share *as_share,
148 struct nvgpu_as_map_buffer_ex_args *args)
149{
150 gk20a_dbg_fn("");
151
152 return gk20a_vm_map_buffer(as_share->vm, args->dmabuf_fd,
153 &args->offset, args->flags,
154 args->kind,
155 args->buffer_offset,
156 args->mapping_size,
157 NULL);
158}
159
160static int gk20a_as_ioctl_map_buffer(
161 struct gk20a_as_share *as_share,
162 struct nvgpu_as_map_buffer_args *args)
163{
164 gk20a_dbg_fn("");
165 return gk20a_vm_map_buffer(as_share->vm, args->dmabuf_fd,
166 &args->o_a.offset,
167 args->flags, NV_KIND_DEFAULT,
168 0, 0, NULL);
169 /* args->o_a.offset will be set if !err */
170}
171
172static int gk20a_as_ioctl_unmap_buffer(
173 struct gk20a_as_share *as_share,
174 struct nvgpu_as_unmap_buffer_args *args)
175{
176 gk20a_dbg_fn("");
177 return gk20a_vm_unmap_buffer(as_share->vm, args->offset, NULL);
178}
179
180static int gk20a_as_ioctl_map_buffer_batch(
181 struct gk20a_as_share *as_share,
182 struct nvgpu_as_map_buffer_batch_args *args)
183{
184 struct gk20a *g = as_share->vm->mm->g;
185 u32 i;
186 int err = 0;
187
188 struct nvgpu_as_unmap_buffer_args __user *user_unmap_args =
189 (struct nvgpu_as_unmap_buffer_args __user *)(uintptr_t)
190 args->unmaps;
191 struct nvgpu_as_map_buffer_ex_args __user *user_map_args =
192 (struct nvgpu_as_map_buffer_ex_args __user *)(uintptr_t)
193 args->maps;
194
195 struct vm_gk20a_mapping_batch batch;
196
197 gk20a_dbg_fn("");
198
199 if (args->num_unmaps > g->gpu_characteristics.map_buffer_batch_limit ||
200 args->num_maps > g->gpu_characteristics.map_buffer_batch_limit)
201 return -EINVAL;
202
203 gk20a_vm_mapping_batch_start(&batch);
204
205 for (i = 0; i < args->num_unmaps; ++i) {
206 struct nvgpu_as_unmap_buffer_args unmap_args;
207
208 if (copy_from_user(&unmap_args, &user_unmap_args[i],
209 sizeof(unmap_args))) {
210 err = -EFAULT;
211 break;
212 }
213
214 err = gk20a_vm_unmap_buffer(as_share->vm, unmap_args.offset,
215 &batch);
216 if (err)
217 break;
218 }
219
220 if (err) {
221 gk20a_vm_mapping_batch_finish(as_share->vm, &batch);
222
223 args->num_unmaps = i;
224 args->num_maps = 0;
225 return err;
226 }
227
228 for (i = 0; i < args->num_maps; ++i) {
229 struct nvgpu_as_map_buffer_ex_args map_args;
230 memset(&map_args, 0, sizeof(map_args));
231
232 if (copy_from_user(&map_args, &user_map_args[i],
233 sizeof(map_args))) {
234 err = -EFAULT;
235 break;
236 }
237
238 err = gk20a_vm_map_buffer(
239 as_share->vm, map_args.dmabuf_fd,
240 &map_args.offset, map_args.flags,
241 map_args.kind,
242 map_args.buffer_offset,
243 map_args.mapping_size,
244 &batch);
245 if (err)
246 break;
247 }
248
249 gk20a_vm_mapping_batch_finish(as_share->vm, &batch);
250
251 if (err)
252 args->num_maps = i;
253 /* note: args->num_unmaps will be unmodified, which is ok
254 * since all unmaps are done */
255
256 return err;
257}
258
259static int gk20a_as_ioctl_get_va_regions(
260 struct gk20a_as_share *as_share,
261 struct nvgpu_as_get_va_regions_args *args)
262{
263 unsigned int i;
264 unsigned int write_entries;
265 struct nvgpu_as_va_region __user *user_region_ptr;
266 struct vm_gk20a *vm = as_share->vm;
267 unsigned int page_sizes = gmmu_page_size_kernel;
268
269 gk20a_dbg_fn("");
270
271 if (!vm->big_pages)
272 page_sizes--;
273
274 write_entries = args->buf_size / sizeof(struct nvgpu_as_va_region);
275 if (write_entries > page_sizes)
276 write_entries = page_sizes;
277
278 user_region_ptr =
279 (struct nvgpu_as_va_region __user *)(uintptr_t)args->buf_addr;
280
281 for (i = 0; i < write_entries; ++i) {
282 struct nvgpu_as_va_region region;
283 struct nvgpu_allocator *vma = vm->vma[i];
284
285 memset(&region, 0, sizeof(struct nvgpu_as_va_region));
286
287 region.page_size = vm->gmmu_page_sizes[i];
288 region.offset = nvgpu_alloc_base(vma);
289 /* No __aeabi_uldivmod() on some platforms... */
290 region.pages = (nvgpu_alloc_end(vma) -
291 nvgpu_alloc_base(vma)) >> ilog2(region.page_size);
292
293 if (copy_to_user(user_region_ptr + i, &region, sizeof(region)))
294 return -EFAULT;
295 }
296
297 args->buf_size =
298 page_sizes * sizeof(struct nvgpu_as_va_region);
299
300 return 0;
301}
302
303static int gk20a_as_ioctl_get_buffer_compbits_info(
304 struct gk20a_as_share *as_share,
305 struct nvgpu_as_get_buffer_compbits_info_args *args)
306{
307 gk20a_dbg_fn("");
308 return gk20a_vm_get_compbits_info(as_share->vm,
309 args->mapping_gva,
310 &args->compbits_win_size,
311 &args->compbits_win_ctagline,
312 &args->mapping_ctagline,
313 &args->flags);
314}
315
316static int gk20a_as_ioctl_map_buffer_compbits(
317 struct gk20a_as_share *as_share,
318 struct nvgpu_as_map_buffer_compbits_args *args)
319{
320 gk20a_dbg_fn("");
321 return gk20a_vm_map_compbits(as_share->vm,
322 args->mapping_gva,
323 &args->compbits_win_gva,
324 &args->mapping_iova,
325 args->flags);
326}
327
328int gk20a_as_dev_open(struct inode *inode, struct file *filp)
329{
330 struct gk20a_as_share *as_share;
331 struct gk20a *g;
332 int err;
333
334 gk20a_dbg_fn("");
335
336 g = container_of(inode->i_cdev, struct gk20a, as.cdev);
337
338 err = gk20a_as_alloc_share(&g->as, 0, 0, &as_share);
339 if (err) {
340 gk20a_dbg_fn("failed to alloc share");
341 return err;
342 }
343
344 filp->private_data = as_share;
345 return 0;
346}
347
348int gk20a_as_dev_release(struct inode *inode, struct file *filp)
349{
350 struct gk20a_as_share *as_share = filp->private_data;
351
352 gk20a_dbg_fn("");
353
354 if (!as_share)
355 return 0;
356
357 return gk20a_as_release_share(as_share);
358}
359
360long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
361{
362 int err = 0;
363 struct gk20a_as_share *as_share = filp->private_data;
364 struct gk20a *g = gk20a_from_as(as_share->as);
365
366 u8 buf[NVGPU_AS_IOCTL_MAX_ARG_SIZE];
367
368 if ((_IOC_TYPE(cmd) != NVGPU_AS_IOCTL_MAGIC) ||
369 (_IOC_NR(cmd) == 0) ||
370 (_IOC_NR(cmd) > NVGPU_AS_IOCTL_LAST) ||
371 (_IOC_SIZE(cmd) > NVGPU_AS_IOCTL_MAX_ARG_SIZE))
372 return -EINVAL;
373
374 memset(buf, 0, sizeof(buf));
375 if (_IOC_DIR(cmd) & _IOC_WRITE) {
376 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
377 return -EFAULT;
378 }
379
380 err = gk20a_busy(g);
381 if (err)
382 return err;
383
384 switch (cmd) {
385 case NVGPU_AS_IOCTL_BIND_CHANNEL:
386 trace_gk20a_as_ioctl_bind_channel(g->name);
387 err = gk20a_as_ioctl_bind_channel(as_share,
388 (struct nvgpu_as_bind_channel_args *)buf);
389
390 break;
391 case NVGPU32_AS_IOCTL_ALLOC_SPACE:
392 {
393 struct nvgpu32_as_alloc_space_args *args32 =
394 (struct nvgpu32_as_alloc_space_args *)buf;
395 struct nvgpu_as_alloc_space_args args;
396
397 args.pages = args32->pages;
398 args.page_size = args32->page_size;
399 args.flags = args32->flags;
400 args.o_a.offset = args32->o_a.offset;
401 trace_gk20a_as_ioctl_alloc_space(g->name);
402 err = gk20a_as_ioctl_alloc_space(as_share, &args);
403 args32->o_a.offset = args.o_a.offset;
404 break;
405 }
406 case NVGPU_AS_IOCTL_ALLOC_SPACE:
407 trace_gk20a_as_ioctl_alloc_space(g->name);
408 err = gk20a_as_ioctl_alloc_space(as_share,
409 (struct nvgpu_as_alloc_space_args *)buf);
410 break;
411 case NVGPU_AS_IOCTL_FREE_SPACE:
412 trace_gk20a_as_ioctl_free_space(g->name);
413 err = gk20a_as_ioctl_free_space(as_share,
414 (struct nvgpu_as_free_space_args *)buf);
415 break;
416 case NVGPU_AS_IOCTL_MAP_BUFFER:
417 trace_gk20a_as_ioctl_map_buffer(g->name);
418 err = gk20a_as_ioctl_map_buffer(as_share,
419 (struct nvgpu_as_map_buffer_args *)buf);
420 break;
421 case NVGPU_AS_IOCTL_MAP_BUFFER_EX:
422 trace_gk20a_as_ioctl_map_buffer(g->name);
423 err = gk20a_as_ioctl_map_buffer_ex(as_share,
424 (struct nvgpu_as_map_buffer_ex_args *)buf);
425 break;
426 case NVGPU_AS_IOCTL_UNMAP_BUFFER:
427 trace_gk20a_as_ioctl_unmap_buffer(g->name);
428 err = gk20a_as_ioctl_unmap_buffer(as_share,
429 (struct nvgpu_as_unmap_buffer_args *)buf);
430 break;
431 case NVGPU_AS_IOCTL_GET_VA_REGIONS:
432 trace_gk20a_as_ioctl_get_va_regions(g->name);
433 err = gk20a_as_ioctl_get_va_regions(as_share,
434 (struct nvgpu_as_get_va_regions_args *)buf);
435 break;
436 case NVGPU_AS_IOCTL_GET_BUFFER_COMPBITS_INFO:
437 err = gk20a_as_ioctl_get_buffer_compbits_info(as_share,
438 (struct nvgpu_as_get_buffer_compbits_info_args *)buf);
439 break;
440 case NVGPU_AS_IOCTL_MAP_BUFFER_COMPBITS:
441 err = gk20a_as_ioctl_map_buffer_compbits(as_share,
442 (struct nvgpu_as_map_buffer_compbits_args *)buf);
443 break;
444 case NVGPU_AS_IOCTL_MAP_BUFFER_BATCH:
445 err = gk20a_as_ioctl_map_buffer_batch(as_share,
446 (struct nvgpu_as_map_buffer_batch_args *)buf);
447 break;
448 default:
449 dev_dbg(dev_from_gk20a(g), "unrecognized as ioctl: 0x%x", cmd);
450 err = -ENOTTY;
451 break;
452 }
453
454 gk20a_idle(g);
455
456 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
457 if (copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd)))
458 err = -EFAULT;
459
460 return err;
461}
diff --git a/drivers/gpu/nvgpu/gk20a/as_gk20a.h b/drivers/gpu/nvgpu/gk20a/as_gk20a.h
deleted file mode 100644
index 9b0c6e14..00000000
--- a/drivers/gpu/nvgpu/gk20a/as_gk20a.h
+++ /dev/null
@@ -1,49 +0,0 @@
1/*
2 * GK20A Address Spaces
3 *
4 * Copyright (c) 2011-2015, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15#ifndef AS_GK20A_H
16#define AS_GK20A_H
17
18#include <linux/atomic.h>
19#include <linux/cdev.h>
20#include <linux/fs.h>
21
22struct gk20a_as;
23struct gk20a_as_share;
24struct vm_gk20a;
25
26struct gk20a_as_share {
27 struct gk20a_as *as;
28 int id;
29 struct vm_gk20a *vm;
30};
31
32struct gk20a_as {
33 int last_share_id; /* dummy allocator for now */
34 struct cdev cdev;
35 struct device *node;
36};
37
38int gk20a_as_release_share(struct gk20a_as_share *as_share);
39
40/* struct file_operations driver interface */
41int gk20a_as_dev_open(struct inode *inode, struct file *filp);
42int gk20a_as_dev_release(struct inode *inode, struct file *filp);
43long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
44
45/* if big_page_size == 0, the default big page size is used */
46int gk20a_as_alloc_share(struct gk20a_as *as, u32 big_page_size,
47 u32 flags, struct gk20a_as_share **out);
48
49#endif
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 451e32ca..1158add1 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -37,13 +37,14 @@ struct dbg_profiler_object_data;
37#include <soc/tegra/chip-id.h> 37#include <soc/tegra/chip-id.h>
38#include <linux/version.h> 38#include <linux/version.h>
39#include <linux/atomic.h> 39#include <linux/atomic.h>
40#include <linux/cdev.h>
40 41
41#include "../../../arch/arm/mach-tegra/iomap.h" 42#include "../../../arch/arm/mach-tegra/iomap.h"
42 43
43#include <nvgpu/pramin.h> 44#include <nvgpu/pramin.h>
44#include <nvgpu/acr/nvgpu_acr.h> 45#include <nvgpu/acr/nvgpu_acr.h>
46#include <nvgpu/as.h>
45 47
46#include "as_gk20a.h"
47#include "clk_gk20a.h" 48#include "clk_gk20a.h"
48#include "ce2_gk20a.h" 49#include "ce2_gk20a.h"
49#include "fifo_gk20a.h" 50#include "fifo_gk20a.h"
@@ -1044,6 +1045,11 @@ struct gk20a {
1044 struct { 1045 struct {
1045 struct cdev cdev; 1046 struct cdev cdev;
1046 struct device *node; 1047 struct device *node;
1048 } as_dev;
1049
1050 struct {
1051 struct cdev cdev;
1052 struct device *node;
1047 } dbg; 1053 } dbg;
1048 1054
1049 struct { 1055 struct {