summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/ioctl_as.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/ioctl_as.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_as.c33
1 files changed, 20 insertions, 13 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_as.c b/drivers/gpu/nvgpu/common/linux/ioctl_as.c
index e09e099b..41bbdfcb 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_as.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_as.c
@@ -50,8 +50,9 @@ static int gk20a_as_ioctl_bind_channel(
50{ 50{
51 int err = 0; 51 int err = 0;
52 struct channel_gk20a *ch; 52 struct channel_gk20a *ch;
53 struct gk20a *g = gk20a_from_vm(as_share->vm);
53 54
54 gk20a_dbg_fn(""); 55 nvgpu_log_fn(g, " ");
55 56
56 ch = gk20a_get_channel_from_file(args->channel_fd); 57 ch = gk20a_get_channel_from_file(args->channel_fd);
57 if (!ch) 58 if (!ch)
@@ -76,7 +77,7 @@ static int gk20a_as_ioctl_alloc_space(
76{ 77{
77 struct gk20a *g = gk20a_from_vm(as_share->vm); 78 struct gk20a *g = gk20a_from_vm(as_share->vm);
78 79
79 gk20a_dbg_fn(""); 80 nvgpu_log_fn(g, " ");
80 return nvgpu_vm_area_alloc(as_share->vm, args->pages, args->page_size, 81 return nvgpu_vm_area_alloc(as_share->vm, args->pages, args->page_size,
81 &args->o_a.offset, 82 &args->o_a.offset,
82 gk20a_as_translate_linux_flags(g, 83 gk20a_as_translate_linux_flags(g,
@@ -87,7 +88,9 @@ static int gk20a_as_ioctl_free_space(
87 struct gk20a_as_share *as_share, 88 struct gk20a_as_share *as_share,
88 struct nvgpu_as_free_space_args *args) 89 struct nvgpu_as_free_space_args *args)
89{ 90{
90 gk20a_dbg_fn(""); 91 struct gk20a *g = gk20a_from_vm(as_share->vm);
92
93 nvgpu_log_fn(g, " ");
91 return nvgpu_vm_area_free(as_share->vm, args->offset); 94 return nvgpu_vm_area_free(as_share->vm, args->offset);
92} 95}
93 96
@@ -95,7 +98,9 @@ static int gk20a_as_ioctl_map_buffer_ex(
95 struct gk20a_as_share *as_share, 98 struct gk20a_as_share *as_share,
96 struct nvgpu_as_map_buffer_ex_args *args) 99 struct nvgpu_as_map_buffer_ex_args *args)
97{ 100{
98 gk20a_dbg_fn(""); 101 struct gk20a *g = gk20a_from_vm(as_share->vm);
102
103 nvgpu_log_fn(g, " ");
99 104
100 /* unsupported, direct kind control must be used */ 105 /* unsupported, direct kind control must be used */
101 if (!(args->flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL)) { 106 if (!(args->flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL)) {
@@ -117,7 +122,9 @@ static int gk20a_as_ioctl_unmap_buffer(
117 struct gk20a_as_share *as_share, 122 struct gk20a_as_share *as_share,
118 struct nvgpu_as_unmap_buffer_args *args) 123 struct nvgpu_as_unmap_buffer_args *args)
119{ 124{
120 gk20a_dbg_fn(""); 125 struct gk20a *g = gk20a_from_vm(as_share->vm);
126
127 nvgpu_log_fn(g, " ");
121 128
122 nvgpu_vm_unmap(as_share->vm, args->offset, NULL); 129 nvgpu_vm_unmap(as_share->vm, args->offset, NULL);
123 130
@@ -128,6 +135,7 @@ static int gk20a_as_ioctl_map_buffer_batch(
128 struct gk20a_as_share *as_share, 135 struct gk20a_as_share *as_share,
129 struct nvgpu_as_map_buffer_batch_args *args) 136 struct nvgpu_as_map_buffer_batch_args *args)
130{ 137{
138 struct gk20a *g = gk20a_from_vm(as_share->vm);
131 u32 i; 139 u32 i;
132 int err = 0; 140 int err = 0;
133 141
@@ -140,7 +148,7 @@ static int gk20a_as_ioctl_map_buffer_batch(
140 148
141 struct vm_gk20a_mapping_batch batch; 149 struct vm_gk20a_mapping_batch batch;
142 150
143 gk20a_dbg_fn(""); 151 nvgpu_log_fn(g, " ");
144 152
145 if (args->num_unmaps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT || 153 if (args->num_unmaps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT ||
146 args->num_maps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT) 154 args->num_maps > NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT)
@@ -220,9 +228,10 @@ static int gk20a_as_ioctl_get_va_regions(
220 unsigned int write_entries; 228 unsigned int write_entries;
221 struct nvgpu_as_va_region __user *user_region_ptr; 229 struct nvgpu_as_va_region __user *user_region_ptr;
222 struct vm_gk20a *vm = as_share->vm; 230 struct vm_gk20a *vm = as_share->vm;
231 struct gk20a *g = gk20a_from_vm(vm);
223 unsigned int page_sizes = gmmu_page_size_kernel; 232 unsigned int page_sizes = gmmu_page_size_kernel;
224 233
225 gk20a_dbg_fn(""); 234 nvgpu_log_fn(g, " ");
226 235
227 if (!vm->big_pages) 236 if (!vm->big_pages)
228 page_sizes--; 237 page_sizes--;
@@ -293,14 +302,14 @@ int gk20a_as_dev_open(struct inode *inode, struct file *filp)
293 struct gk20a *g; 302 struct gk20a *g;
294 int err; 303 int err;
295 304
296 gk20a_dbg_fn("");
297
298 l = container_of(inode->i_cdev, struct nvgpu_os_linux, as_dev.cdev); 305 l = container_of(inode->i_cdev, struct nvgpu_os_linux, as_dev.cdev);
299 g = &l->g; 306 g = &l->g;
300 307
308 nvgpu_log_fn(g, " ");
309
301 err = gk20a_as_alloc_share(g, 0, 0, &as_share); 310 err = gk20a_as_alloc_share(g, 0, 0, &as_share);
302 if (err) { 311 if (err) {
303 gk20a_dbg_fn("failed to alloc share"); 312 nvgpu_log_fn(g, "failed to alloc share");
304 return err; 313 return err;
305 } 314 }
306 315
@@ -312,8 +321,6 @@ int gk20a_as_dev_release(struct inode *inode, struct file *filp)
312{ 321{
313 struct gk20a_as_share *as_share = filp->private_data; 322 struct gk20a_as_share *as_share = filp->private_data;
314 323
315 gk20a_dbg_fn("");
316
317 if (!as_share) 324 if (!as_share)
318 return 0; 325 return 0;
319 326
@@ -328,7 +335,7 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
328 335
329 u8 buf[NVGPU_AS_IOCTL_MAX_ARG_SIZE]; 336 u8 buf[NVGPU_AS_IOCTL_MAX_ARG_SIZE];
330 337
331 gk20a_dbg_fn("start %d", _IOC_NR(cmd)); 338 nvgpu_log_fn(g, "start %d", _IOC_NR(cmd));
332 339
333 if ((_IOC_TYPE(cmd) != NVGPU_AS_IOCTL_MAGIC) || 340 if ((_IOC_TYPE(cmd) != NVGPU_AS_IOCTL_MAGIC) ||
334 (_IOC_NR(cmd) == 0) || 341 (_IOC_NR(cmd) == 0) ||